From d03a22bf9960b8b2d3b2b364086264e89135115a Mon Sep 17 00:00:00 2001 From: Max Ren Date: Mon, 24 Mar 2025 16:07:10 -0700 Subject: [PATCH 1/2] Reset Dynamo at Setup for all tests --- backends/xnnpack/test/models/deeplab_v3.py | 3 +++ backends/xnnpack/test/models/edsr.py | 3 +++ backends/xnnpack/test/models/emformer_rnnt.py | 3 +++ backends/xnnpack/test/models/inception_v3.py | 3 +++ backends/xnnpack/test/models/inception_v4.py | 3 +++ backends/xnnpack/test/models/llama2_et_example.py | 3 +++ backends/xnnpack/test/models/mobilebert.py | 3 +++ backends/xnnpack/test/models/mobilenet_v2.py | 3 +++ backends/xnnpack/test/models/mobilenet_v3.py | 3 +++ backends/xnnpack/test/models/resnet.py | 3 +++ backends/xnnpack/test/models/torchvision_vit.py | 3 +++ backends/xnnpack/test/models/very_big_model.py | 3 +++ backends/xnnpack/test/models/w2l.py | 3 +++ backends/xnnpack/test/ops/test_abs.py | 3 +++ backends/xnnpack/test/ops/test_add.py | 3 +++ backends/xnnpack/test/ops/test_avgpool2d.py | 3 +++ backends/xnnpack/test/ops/test_bilinear2d.py | 3 +++ backends/xnnpack/test/ops/test_bmm.py | 3 +++ backends/xnnpack/test/ops/test_cat.py | 3 +++ backends/xnnpack/test/ops/test_ceil.py | 3 +++ backends/xnnpack/test/ops/test_check_quant_params.py | 3 +++ backends/xnnpack/test/ops/test_clamp.py | 3 +++ backends/xnnpack/test/ops/test_conv1d.py | 3 +++ backends/xnnpack/test/ops/test_conv2d.py | 3 +++ backends/xnnpack/test/ops/test_div.py | 3 +++ backends/xnnpack/test/ops/test_elu.py | 3 +++ backends/xnnpack/test/ops/test_floor.py | 3 +++ backends/xnnpack/test/ops/test_hardswish.py | 3 +++ backends/xnnpack/test/ops/test_hardtanh.py | 3 +++ backends/xnnpack/test/ops/test_leaky_relu.py | 3 +++ backends/xnnpack/test/ops/test_linear.py | 3 +++ backends/xnnpack/test/ops/test_lstm.py | 3 +++ backends/xnnpack/test/ops/test_max_dim.py | 3 +++ backends/xnnpack/test/ops/test_maximum.py | 3 +++ backends/xnnpack/test/ops/test_maxpool2d.py | 3 +++ backends/xnnpack/test/ops/test_mean_dim.py | 3 +++ backends/xnnpack/test/ops/test_minimum.py | 3 +++ backends/xnnpack/test/ops/test_multiply.py | 3 +++ backends/xnnpack/test/ops/test_negate.py | 3 +++ backends/xnnpack/test/ops/test_permute.py | 3 +++ backends/xnnpack/test/ops/test_pow.py | 3 +++ backends/xnnpack/test/ops/test_prelu.py | 3 +++ backends/xnnpack/test/ops/test_quantize_per_tensor.py | 3 +++ backends/xnnpack/test/ops/test_relu.py | 3 +++ backends/xnnpack/test/ops/test_rsqrt.py | 3 +++ backends/xnnpack/test/ops/test_sdpa.py | 3 +++ backends/xnnpack/test/ops/test_sigmoid.py | 3 +++ backends/xnnpack/test/ops/test_slice_copy.py | 3 +++ backends/xnnpack/test/ops/test_softmax.py | 3 +++ backends/xnnpack/test/ops/test_sqrt.py | 3 +++ backends/xnnpack/test/ops/test_square.py | 3 +++ backends/xnnpack/test/ops/test_static_constant_pad.py | 3 +++ backends/xnnpack/test/ops/test_sub.py | 3 +++ backends/xnnpack/test/passes/test_activation_fusion.py | 3 +++ backends/xnnpack/test/passes/test_batch_norm_fusion.py | 3 +++ .../xnnpack/test/passes/test_channels_last_tagged_reshape.py | 3 +++ backends/xnnpack/test/passes/test_convert_to_linear.py | 3 +++ backends/xnnpack/test/passes/test_decompose_cat_pass.py | 3 +++ backends/xnnpack/test/passes/test_remove_get_item_pass.py | 3 +++ backends/xnnpack/test/passes/test_tag_implicit_q_dq_pass.py | 3 +++ 60 files changed, 180 insertions(+) diff --git a/backends/xnnpack/test/models/deeplab_v3.py b/backends/xnnpack/test/models/deeplab_v3.py index 9913296521d..c47832b63d1 100644 --- a/backends/xnnpack/test/models/deeplab_v3.py +++ b/backends/xnnpack/test/models/deeplab_v3.py @@ -23,6 +23,9 @@ def forward(self, *args): class TestDeepLabV3(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + dl3 = DL3Wrapper() dl3 = dl3.eval() model_inputs = (torch.randn(1, 3, 224, 224),) diff --git a/backends/xnnpack/test/models/edsr.py b/backends/xnnpack/test/models/edsr.py index 34b5898cf41..138ea62ddf5 100644 --- a/backends/xnnpack/test/models/edsr.py +++ b/backends/xnnpack/test/models/edsr.py @@ -14,6 +14,9 @@ class TestEDSR(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + edsr = edsr_r16f64(2, False).eval() # noqa model_inputs = (torch.randn(1, 3, 224, 224),) diff --git a/backends/xnnpack/test/models/emformer_rnnt.py b/backends/xnnpack/test/models/emformer_rnnt.py index 5cf4337307c..d5125361def 100644 --- a/backends/xnnpack/test/models/emformer_rnnt.py +++ b/backends/xnnpack/test/models/emformer_rnnt.py @@ -13,6 +13,9 @@ class TestEmformerModel(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class EmformerRnnt(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/models/inception_v3.py b/backends/xnnpack/test/models/inception_v3.py index 59fd56d6af7..d5d6d086206 100644 --- a/backends/xnnpack/test/models/inception_v3.py +++ b/backends/xnnpack/test/models/inception_v3.py @@ -13,6 +13,9 @@ class TestInceptionV3(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + ic3 = models.inception_v3(weights="IMAGENET1K_V1").eval() # noqa model_inputs = (torch.randn(1, 3, 224, 224),) diff --git a/backends/xnnpack/test/models/inception_v4.py b/backends/xnnpack/test/models/inception_v4.py index e8a785116a3..c5239bb2dd3 100644 --- a/backends/xnnpack/test/models/inception_v4.py +++ b/backends/xnnpack/test/models/inception_v4.py @@ -12,6 +12,9 @@ class TestInceptionV4(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + ic4 = inception_v4(pretrained=False).eval() model_inputs = (torch.randn(3, 299, 299).unsqueeze(0),) diff --git a/backends/xnnpack/test/models/llama2_et_example.py b/backends/xnnpack/test/models/llama2_et_example.py index f1dce43c3c9..378f9dd3d48 100644 --- a/backends/xnnpack/test/models/llama2_et_example.py +++ b/backends/xnnpack/test/models/llama2_et_example.py @@ -13,6 +13,9 @@ class TestLlama2ETExample(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + def test_f32(self): self._test() diff --git a/backends/xnnpack/test/models/mobilebert.py b/backends/xnnpack/test/models/mobilebert.py index ca18e6c265d..57c099e87d1 100644 --- a/backends/xnnpack/test/models/mobilebert.py +++ b/backends/xnnpack/test/models/mobilebert.py @@ -12,6 +12,9 @@ class TestMobilebert(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + # pyre-ignore mobilebert = MobileBertModel(MobileBertConfig()).eval() example_inputs = (torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]]),) diff --git a/backends/xnnpack/test/models/mobilenet_v2.py b/backends/xnnpack/test/models/mobilenet_v2.py index 4ee28af6b95..2ff93303d50 100644 --- a/backends/xnnpack/test/models/mobilenet_v2.py +++ b/backends/xnnpack/test/models/mobilenet_v2.py @@ -14,6 +14,9 @@ class TestMobileNetV2(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + mv2 = models.mobilenetv2.mobilenet_v2(weights=MobileNet_V2_Weights) mv2 = mv2.eval() model_inputs = (torch.randn(1, 3, 224, 224),) diff --git a/backends/xnnpack/test/models/mobilenet_v3.py b/backends/xnnpack/test/models/mobilenet_v3.py index cacd8b5cc87..f64b7352b7f 100644 --- a/backends/xnnpack/test/models/mobilenet_v3.py +++ b/backends/xnnpack/test/models/mobilenet_v3.py @@ -13,6 +13,9 @@ class TestMobileNetV3(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + mv3 = models.mobilenetv3.mobilenet_v3_small(pretrained=True) mv3 = mv3.eval() model_inputs = (torch.randn(1, 3, 224, 224),) diff --git a/backends/xnnpack/test/models/resnet.py b/backends/xnnpack/test/models/resnet.py index 4ad6a7d5f47..9f4989e1724 100644 --- a/backends/xnnpack/test/models/resnet.py +++ b/backends/xnnpack/test/models/resnet.py @@ -13,6 +13,9 @@ class TestResNet18(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + inputs = (torch.randn(1, 3, 224, 224),) dynamic_shapes = ( { diff --git a/backends/xnnpack/test/models/torchvision_vit.py b/backends/xnnpack/test/models/torchvision_vit.py index 6bebd284e53..f9153032cd8 100644 --- a/backends/xnnpack/test/models/torchvision_vit.py +++ b/backends/xnnpack/test/models/torchvision_vit.py @@ -12,6 +12,9 @@ class TestViT(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + vit = models.vision_transformer.vit_b_16(weights="IMAGENET1K_V1") vit = vit.eval() model_inputs = (torch.randn(1, 3, 224, 224),) diff --git a/backends/xnnpack/test/models/very_big_model.py b/backends/xnnpack/test/models/very_big_model.py index 3545287c628..f4f10f1611c 100644 --- a/backends/xnnpack/test/models/very_big_model.py +++ b/backends/xnnpack/test/models/very_big_model.py @@ -11,6 +11,9 @@ class TestVeryBigModel(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class BigModel(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/models/w2l.py b/backends/xnnpack/test/models/w2l.py index 07b3bf56b32..216fb7a89f0 100644 --- a/backends/xnnpack/test/models/w2l.py +++ b/backends/xnnpack/test/models/w2l.py @@ -12,6 +12,9 @@ class TestW2L(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + batch_size = 10 input_frames = 700 vocab_size = 4096 diff --git a/backends/xnnpack/test/ops/test_abs.py b/backends/xnnpack/test/ops/test_abs.py index a41bee47470..48feaafb0e1 100644 --- a/backends/xnnpack/test/ops/test_abs.py +++ b/backends/xnnpack/test/ops/test_abs.py @@ -11,6 +11,9 @@ class TestAbs(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Abs(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_add.py b/backends/xnnpack/test/ops/test_add.py index 29a87df1303..2416879f5ce 100644 --- a/backends/xnnpack/test/ops/test_add.py +++ b/backends/xnnpack/test/ops/test_add.py @@ -11,6 +11,9 @@ class TestAdd(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Add(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_avgpool2d.py b/backends/xnnpack/test/ops/test_avgpool2d.py index b471fd914c2..c1f149e5a93 100644 --- a/backends/xnnpack/test/ops/test_avgpool2d.py +++ b/backends/xnnpack/test/ops/test_avgpool2d.py @@ -11,6 +11,9 @@ class TestAvgPool2d(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class AvgPool2d(torch.nn.Module): def __init__( self, count_include_pad=False, ceil_mode=False, divisor_override=None diff --git a/backends/xnnpack/test/ops/test_bilinear2d.py b/backends/xnnpack/test/ops/test_bilinear2d.py index 24c990d6bb1..1fd3c147328 100644 --- a/backends/xnnpack/test/ops/test_bilinear2d.py +++ b/backends/xnnpack/test/ops/test_bilinear2d.py @@ -14,6 +14,9 @@ class TestUpsampleBilinear2d(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class StaticResizeBilinear2dModule(torch.nn.Module): def forward(self, x): a = torch.nn.functional.interpolate( diff --git a/backends/xnnpack/test/ops/test_bmm.py b/backends/xnnpack/test/ops/test_bmm.py index 1c6235e5f7e..a029738e771 100644 --- a/backends/xnnpack/test/ops/test_bmm.py +++ b/backends/xnnpack/test/ops/test_bmm.py @@ -11,6 +11,9 @@ class TestBMM(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class BMM(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_cat.py b/backends/xnnpack/test/ops/test_cat.py index dd551ea3fa7..11e246f541a 100644 --- a/backends/xnnpack/test/ops/test_cat.py +++ b/backends/xnnpack/test/ops/test_cat.py @@ -13,6 +13,9 @@ class TestCat(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Cat(torch.nn.Module): def __init__(self, dim=0): super().__init__() diff --git a/backends/xnnpack/test/ops/test_ceil.py b/backends/xnnpack/test/ops/test_ceil.py index 9caee15ad5b..717df6f47e6 100644 --- a/backends/xnnpack/test/ops/test_ceil.py +++ b/backends/xnnpack/test/ops/test_ceil.py @@ -11,6 +11,9 @@ class TestCeil(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Ceil(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_check_quant_params.py b/backends/xnnpack/test/ops/test_check_quant_params.py index cd18568afba..9923e24c86a 100644 --- a/backends/xnnpack/test/ops/test_check_quant_params.py +++ b/backends/xnnpack/test/ops/test_check_quant_params.py @@ -14,6 +14,9 @@ class TestCheckQuantParams(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + def create_invalid_value_injector( self, invalid_value, is_per_channel=False, is_zp=False ): diff --git a/backends/xnnpack/test/ops/test_clamp.py b/backends/xnnpack/test/ops/test_clamp.py index 9fb8935553e..671d9372e18 100644 --- a/backends/xnnpack/test/ops/test_clamp.py +++ b/backends/xnnpack/test/ops/test_clamp.py @@ -11,6 +11,9 @@ class TestClamp(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Clamp(torch.nn.Module): def __init__(self, min_val=None, max_val=None): super().__init__() diff --git a/backends/xnnpack/test/ops/test_conv1d.py b/backends/xnnpack/test/ops/test_conv1d.py index b4c8c414929..036500b29d5 100644 --- a/backends/xnnpack/test/ops/test_conv1d.py +++ b/backends/xnnpack/test/ops/test_conv1d.py @@ -19,6 +19,9 @@ class TestConv1d(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Conv1d(torch.nn.Module): def __init__(self, dtype: torch.dtype = torch.float): groups = 1 diff --git a/backends/xnnpack/test/ops/test_conv2d.py b/backends/xnnpack/test/ops/test_conv2d.py index d3e5db8df2d..80b731bd18e 100644 --- a/backends/xnnpack/test/ops/test_conv2d.py +++ b/backends/xnnpack/test/ops/test_conv2d.py @@ -170,6 +170,9 @@ def get_inputs(self): class TestConv2d(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + def _test( self, m: torch.nn.Module, diff --git a/backends/xnnpack/test/ops/test_div.py b/backends/xnnpack/test/ops/test_div.py index 9bca5feed48..b53c59df8e1 100644 --- a/backends/xnnpack/test/ops/test_div.py +++ b/backends/xnnpack/test/ops/test_div.py @@ -11,6 +11,9 @@ class TestDiv(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Div(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_elu.py b/backends/xnnpack/test/ops/test_elu.py index f976c29d799..68a0c687779 100644 --- a/backends/xnnpack/test/ops/test_elu.py +++ b/backends/xnnpack/test/ops/test_elu.py @@ -11,6 +11,9 @@ class TestElu(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class ELU(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_floor.py b/backends/xnnpack/test/ops/test_floor.py index dfbe7fb18c0..5c543fd0534 100644 --- a/backends/xnnpack/test/ops/test_floor.py +++ b/backends/xnnpack/test/ops/test_floor.py @@ -11,6 +11,9 @@ class TestFloor(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Floor(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_hardswish.py b/backends/xnnpack/test/ops/test_hardswish.py index 899a119ed44..561551fc433 100644 --- a/backends/xnnpack/test/ops/test_hardswish.py +++ b/backends/xnnpack/test/ops/test_hardswish.py @@ -11,6 +11,9 @@ class TestHardswish(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Hardswish(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_hardtanh.py b/backends/xnnpack/test/ops/test_hardtanh.py index e35e840e3c3..6f2914010c7 100644 --- a/backends/xnnpack/test/ops/test_hardtanh.py +++ b/backends/xnnpack/test/ops/test_hardtanh.py @@ -11,6 +11,9 @@ class TestHardTanh(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class HardTanh(torch.nn.Module): def __init__(self, min_val=-1.0, max_val=1.0): super().__init__() diff --git a/backends/xnnpack/test/ops/test_leaky_relu.py b/backends/xnnpack/test/ops/test_leaky_relu.py index 32f73486977..c0921ddf3ad 100644 --- a/backends/xnnpack/test/ops/test_leaky_relu.py +++ b/backends/xnnpack/test/ops/test_leaky_relu.py @@ -11,6 +11,9 @@ class TestLeakyRelu(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class LeakyReLU(torch.nn.Module): def __init__(self, **kwargs): super().__init__() diff --git a/backends/xnnpack/test/ops/test_linear.py b/backends/xnnpack/test/ops/test_linear.py index cf9473180bb..849a1b237e8 100644 --- a/backends/xnnpack/test/ops/test_linear.py +++ b/backends/xnnpack/test/ops/test_linear.py @@ -219,6 +219,9 @@ class TestLinear(unittest.TestCase): should produce strictly better results compared to Per-Tensor Quantization """ + def setUp(self): + torch._dynamo.reset() + @staticmethod def _get_4b_dqconfig() -> QuantizationConfig: # Returns a QuantizationConfig for 4b dynamic quantization for XNNPACK. diff --git a/backends/xnnpack/test/ops/test_lstm.py b/backends/xnnpack/test/ops/test_lstm.py index 6c174b16f33..db4deb9aae4 100644 --- a/backends/xnnpack/test/ops/test_lstm.py +++ b/backends/xnnpack/test/ops/test_lstm.py @@ -14,6 +14,9 @@ class TestLSTM(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class LSTMLinear(torch.nn.Module): def __init__(self, input_size, hidden_size, out_size): super().__init__() diff --git a/backends/xnnpack/test/ops/test_max_dim.py b/backends/xnnpack/test/ops/test_max_dim.py index c660a5a6d26..f209845372e 100644 --- a/backends/xnnpack/test/ops/test_max_dim.py +++ b/backends/xnnpack/test/ops/test_max_dim.py @@ -11,6 +11,9 @@ class TestMaxDim(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Max(torch.nn.Module): def forward(self, x): max_values_1, max_indices_1 = torch.max(x, dim=2, keepdim=True) diff --git a/backends/xnnpack/test/ops/test_maximum.py b/backends/xnnpack/test/ops/test_maximum.py index 30dfa5503a9..c594452631c 100644 --- a/backends/xnnpack/test/ops/test_maximum.py +++ b/backends/xnnpack/test/ops/test_maximum.py @@ -11,6 +11,9 @@ class TestMaximum(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Maximum(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_maxpool2d.py b/backends/xnnpack/test/ops/test_maxpool2d.py index 521235232a2..f82b27b09ec 100644 --- a/backends/xnnpack/test/ops/test_maxpool2d.py +++ b/backends/xnnpack/test/ops/test_maxpool2d.py @@ -15,6 +15,9 @@ class TestMaxPool2d(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class MaxPool2d(torch.nn.Module): def __init__(self, kernel_size=3, stride=1, padding=0, dilation=1): super().__init__() diff --git a/backends/xnnpack/test/ops/test_mean_dim.py b/backends/xnnpack/test/ops/test_mean_dim.py index 3bac5f3239c..81a93c3e97e 100644 --- a/backends/xnnpack/test/ops/test_mean_dim.py +++ b/backends/xnnpack/test/ops/test_mean_dim.py @@ -11,6 +11,9 @@ class TestMeanDim(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class MeanDim(torch.nn.Module): def __init__(self, dims): super().__init__() diff --git a/backends/xnnpack/test/ops/test_minimum.py b/backends/xnnpack/test/ops/test_minimum.py index 406ac8485e5..fe1af3de5ab 100644 --- a/backends/xnnpack/test/ops/test_minimum.py +++ b/backends/xnnpack/test/ops/test_minimum.py @@ -11,6 +11,9 @@ class TestMinimum(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Minimum(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_multiply.py b/backends/xnnpack/test/ops/test_multiply.py index db50bc5dd44..3315200005d 100644 --- a/backends/xnnpack/test/ops/test_multiply.py +++ b/backends/xnnpack/test/ops/test_multiply.py @@ -11,6 +11,9 @@ class TestMul(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Mul(torch.nn.Module): def forward(self, x, y): z = x * y diff --git a/backends/xnnpack/test/ops/test_negate.py b/backends/xnnpack/test/ops/test_negate.py index 4d158612e97..5022255e484 100644 --- a/backends/xnnpack/test/ops/test_negate.py +++ b/backends/xnnpack/test/ops/test_negate.py @@ -11,6 +11,9 @@ class TestNegate(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Negate(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_permute.py b/backends/xnnpack/test/ops/test_permute.py index b348fc8af6d..2991ba1773d 100644 --- a/backends/xnnpack/test/ops/test_permute.py +++ b/backends/xnnpack/test/ops/test_permute.py @@ -11,6 +11,9 @@ class TestPermute(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Permute(torch.nn.Module): def __init__(self, dims): self.dims = dims diff --git a/backends/xnnpack/test/ops/test_pow.py b/backends/xnnpack/test/ops/test_pow.py index ac902ae44be..2accb010210 100644 --- a/backends/xnnpack/test/ops/test_pow.py +++ b/backends/xnnpack/test/ops/test_pow.py @@ -11,6 +11,9 @@ class TestPow(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Pow(torch.nn.Module): def __init__(self, exp): super().__init__() diff --git a/backends/xnnpack/test/ops/test_prelu.py b/backends/xnnpack/test/ops/test_prelu.py index f73648dfa25..47b2851278c 100644 --- a/backends/xnnpack/test/ops/test_prelu.py +++ b/backends/xnnpack/test/ops/test_prelu.py @@ -11,6 +11,9 @@ class TestPrelu(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class PReLU(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_quantize_per_tensor.py b/backends/xnnpack/test/ops/test_quantize_per_tensor.py index c2117987536..9e876c09671 100644 --- a/backends/xnnpack/test/ops/test_quantize_per_tensor.py +++ b/backends/xnnpack/test/ops/test_quantize_per_tensor.py @@ -13,6 +13,9 @@ class TestQuantizePerTensor(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + def test_qs8_quantize_per_tensor(self): class Quant(torch.nn.Module): def forward(self, x): diff --git a/backends/xnnpack/test/ops/test_relu.py b/backends/xnnpack/test/ops/test_relu.py index 8672b1d3e4e..508c1ceeffa 100644 --- a/backends/xnnpack/test/ops/test_relu.py +++ b/backends/xnnpack/test/ops/test_relu.py @@ -11,6 +11,9 @@ class TestRelu(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Relu(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_rsqrt.py b/backends/xnnpack/test/ops/test_rsqrt.py index e5d704a0467..5405e966359 100644 --- a/backends/xnnpack/test/ops/test_rsqrt.py +++ b/backends/xnnpack/test/ops/test_rsqrt.py @@ -11,6 +11,9 @@ class TestRsqrt(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Rsqrt(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_sdpa.py b/backends/xnnpack/test/ops/test_sdpa.py index de5c7174ab5..205b6d4ab36 100644 --- a/backends/xnnpack/test/ops/test_sdpa.py +++ b/backends/xnnpack/test/ops/test_sdpa.py @@ -15,6 +15,9 @@ class TestSDPA(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class SDPA(torch.nn.Module): def __init__(self, scale: Optional[float] = None): super().__init__() diff --git a/backends/xnnpack/test/ops/test_sigmoid.py b/backends/xnnpack/test/ops/test_sigmoid.py index a9acd4df6db..fe55f0f1ef5 100644 --- a/backends/xnnpack/test/ops/test_sigmoid.py +++ b/backends/xnnpack/test/ops/test_sigmoid.py @@ -11,6 +11,9 @@ class TestSigmoid(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Sigmoid(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_slice_copy.py b/backends/xnnpack/test/ops/test_slice_copy.py index 8ff37368578..ea65571b1e8 100644 --- a/backends/xnnpack/test/ops/test_slice_copy.py +++ b/backends/xnnpack/test/ops/test_slice_copy.py @@ -11,6 +11,9 @@ class TestSliceCopy(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + def _test_slice_copy(self, module, inputs, copy_count=1, edge_copy_count=1): ( Tester(module, inputs) diff --git a/backends/xnnpack/test/ops/test_softmax.py b/backends/xnnpack/test/ops/test_softmax.py index f909e8ce5f2..bf078860ba5 100644 --- a/backends/xnnpack/test/ops/test_softmax.py +++ b/backends/xnnpack/test/ops/test_softmax.py @@ -11,6 +11,9 @@ class TestSoftmax(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Softmax(torch.nn.Module): def __init__(self, dim): super().__init__() diff --git a/backends/xnnpack/test/ops/test_sqrt.py b/backends/xnnpack/test/ops/test_sqrt.py index eaeb3b9f700..ee800c62568 100644 --- a/backends/xnnpack/test/ops/test_sqrt.py +++ b/backends/xnnpack/test/ops/test_sqrt.py @@ -11,6 +11,9 @@ class TestSqrt(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Sqrt(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_square.py b/backends/xnnpack/test/ops/test_square.py index 32a19639343..c7a567239bb 100644 --- a/backends/xnnpack/test/ops/test_square.py +++ b/backends/xnnpack/test/ops/test_square.py @@ -11,6 +11,9 @@ class TestSquare(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Square(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_static_constant_pad.py b/backends/xnnpack/test/ops/test_static_constant_pad.py index b1b41afe8cf..c5d103f596a 100644 --- a/backends/xnnpack/test/ops/test_static_constant_pad.py +++ b/backends/xnnpack/test/ops/test_static_constant_pad.py @@ -11,6 +11,9 @@ class TestStaticConstantPad(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class StaticConstantPadFunctional(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/ops/test_sub.py b/backends/xnnpack/test/ops/test_sub.py index fb3d3d3f948..06219730ddb 100644 --- a/backends/xnnpack/test/ops/test_sub.py +++ b/backends/xnnpack/test/ops/test_sub.py @@ -11,6 +11,9 @@ class TestSub(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + class Sub(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/xnnpack/test/passes/test_activation_fusion.py b/backends/xnnpack/test/passes/test_activation_fusion.py index 5f340f61b2e..6a1182dc7fb 100644 --- a/backends/xnnpack/test/passes/test_activation_fusion.py +++ b/backends/xnnpack/test/passes/test_activation_fusion.py @@ -16,6 +16,9 @@ class TestActivationFusion(unittest.TestCase): PassStage = RunPasses([ConvertToLinearPass, FuseActivationPass]) + def setUp(self): + torch._dynamo.reset() + def check_node_has_tag(self, graph_module, node_target, tag): for n in graph_module.graph.nodes: if n.op == "call_function" and n.target == node_target: diff --git a/backends/xnnpack/test/passes/test_batch_norm_fusion.py b/backends/xnnpack/test/passes/test_batch_norm_fusion.py index 59d0e0a2072..70c93c3751b 100644 --- a/backends/xnnpack/test/passes/test_batch_norm_fusion.py +++ b/backends/xnnpack/test/passes/test_batch_norm_fusion.py @@ -18,6 +18,9 @@ class TestBatchNormFusion(unittest.TestCase): PassStage = RunPasses([FuseBatchNormWithConvPass]) bn_name = "executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default" + def setUp(self): + torch._dynamo.reset() + class ModelConvBN(torch.nn.Module): def __init__( self, diff --git a/backends/xnnpack/test/passes/test_channels_last_tagged_reshape.py b/backends/xnnpack/test/passes/test_channels_last_tagged_reshape.py index c1438b29213..6d60f9d76b5 100644 --- a/backends/xnnpack/test/passes/test_channels_last_tagged_reshape.py +++ b/backends/xnnpack/test/passes/test_channels_last_tagged_reshape.py @@ -17,6 +17,9 @@ class TestChannelsLastTaggedReshapePass(unittest.TestCase): + def setUp(self): + torch._dynamo.reset() + PassStage = RunPasses([ChannelsLastTaggedReshapePass]) # Dictionary mapping modules to expected number of reshapes modules = { diff --git a/backends/xnnpack/test/passes/test_convert_to_linear.py b/backends/xnnpack/test/passes/test_convert_to_linear.py index a07f8cf61ce..0ed002cae34 100644 --- a/backends/xnnpack/test/passes/test_convert_to_linear.py +++ b/backends/xnnpack/test/passes/test_convert_to_linear.py @@ -14,6 +14,9 @@ class TestConvertToLinear(unittest.TestCase): PassStage = RunPasses([ConvertToLinearPass]) + def setUp(self): + torch._dynamo.reset() + def test_fp32_convert_to_linear(self): in_sizes = [1, 4, 4] input_sizes = [4, 37, 17] diff --git a/backends/xnnpack/test/passes/test_decompose_cat_pass.py b/backends/xnnpack/test/passes/test_decompose_cat_pass.py index beb1761aec8..38537a99c4d 100644 --- a/backends/xnnpack/test/passes/test_decompose_cat_pass.py +++ b/backends/xnnpack/test/passes/test_decompose_cat_pass.py @@ -16,6 +16,9 @@ class TestDecomposeCatPass(unittest.TestCase): PassStage = RunPasses([DecomposeConcatenate]) cat_name = "executorch_exir_dialects_edge__ops_aten_cat_default" + def setUp(self): + torch._dynamo.reset() + class Cat(torch.nn.Module): def forward(self, *args): xs = [*args] diff --git a/backends/xnnpack/test/passes/test_remove_get_item_pass.py b/backends/xnnpack/test/passes/test_remove_get_item_pass.py index 2365c9bba0c..4d71d61afd7 100644 --- a/backends/xnnpack/test/passes/test_remove_get_item_pass.py +++ b/backends/xnnpack/test/passes/test_remove_get_item_pass.py @@ -16,6 +16,9 @@ class TestRemoveGetItemPass(unittest.TestCase): max_pool2d_name = "executorch_exir_dialects_edge__ops_aten_max_pool2d_default" amax_name = "executorch_exir_dialects_edge__ops_aten_amax_default" + def setUp(self): + torch._dynamo.reset() + class MaxPool2dModule(torch.nn.Module): def __init__( self, diff --git a/backends/xnnpack/test/passes/test_tag_implicit_q_dq_pass.py b/backends/xnnpack/test/passes/test_tag_implicit_q_dq_pass.py index 05d1ac9e8b6..6fec7726835 100644 --- a/backends/xnnpack/test/passes/test_tag_implicit_q_dq_pass.py +++ b/backends/xnnpack/test/passes/test_tag_implicit_q_dq_pass.py @@ -20,6 +20,9 @@ class TestTagImplicitQDq(unittest.TestCase): PassStage = RunPasses([DuplicateDequantNodePass, TagImplicitQDqPass]) + def setUp(self): + torch._dynamo.reset() + class QDqModule(torch.nn.Module): def __init__(self): super().__init__() From 54170190a253c9ebcfdafa73be4fb054d871f88d Mon Sep 17 00:00:00 2001 From: Max Ren Date: Mon, 24 Mar 2025 16:59:24 -0700 Subject: [PATCH 2/2] reset dynamo in for lops --- backends/xnnpack/test/ops/test_check_quant_params.py | 1 + backends/xnnpack/test/passes/test_convert_to_linear.py | 1 + 2 files changed, 2 insertions(+) diff --git a/backends/xnnpack/test/ops/test_check_quant_params.py b/backends/xnnpack/test/ops/test_check_quant_params.py index 9923e24c86a..b76935a9f72 100644 --- a/backends/xnnpack/test/ops/test_check_quant_params.py +++ b/backends/xnnpack/test/ops/test_check_quant_params.py @@ -49,6 +49,7 @@ def inject_invalid_scale_in_per_tensor(aten): return inject_invalid_scale_in_per_tensor def _test_check_quant_message(self, ep_modifier, expected_message): + torch._dynamo.reset() mod = torch.nn.Linear(10, 10) quantizer = XNNPACKQuantizer() captured = export_for_training(mod, (torch.randn(1, 10),)).module() diff --git a/backends/xnnpack/test/passes/test_convert_to_linear.py b/backends/xnnpack/test/passes/test_convert_to_linear.py index 0ed002cae34..0e7bc7d01c4 100644 --- a/backends/xnnpack/test/passes/test_convert_to_linear.py +++ b/backends/xnnpack/test/passes/test_convert_to_linear.py @@ -24,6 +24,7 @@ def test_fp32_convert_to_linear(self): bias_vals = [True, True, False] for i, _ in enumerate(in_sizes): + torch._dynamo.reset() in_size = int(in_sizes[i]) input_size = int(input_sizes[i]) output_size = int(output_sizes[i])