diff --git a/tests/models/test_models_prior.py b/tests/models/test_models_prior.py index ca27b6ff057f..896a75de6f1b 100644 --- a/tests/models/test_models_prior.py +++ b/tests/models/test_models_prior.py @@ -164,7 +164,7 @@ def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - backend_empty_cache() + backend_empty_cache(torch_device) @parameterized.expand( [ diff --git a/tests/models/test_models_unet_2d_condition.py b/tests/models/test_models_unet_2d_condition.py index 80f59734b5ce..35ea33328c1d 100644 --- a/tests/models/test_models_unet_2d_condition.py +++ b/tests/models/test_models_unet_2d_condition.py @@ -869,7 +869,7 @@ def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - backend_empty_cache() + backend_empty_cache(torch_device) def get_latents(self, seed=0, shape=(4, 4, 64, 64), fp16=False): dtype = torch.float16 if fp16 else torch.float32 diff --git a/tests/models/test_models_vae.py b/tests/models/test_models_vae.py index df34a48da3aa..a4ff31706be7 100644 --- a/tests/models/test_models_vae.py +++ b/tests/models/test_models_vae.py @@ -485,7 +485,7 @@ def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - backend_empty_cache() + backend_empty_cache(torch_device) def get_file_format(self, seed, shape): return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" @@ -565,7 +565,7 @@ def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - backend_empty_cache() + backend_empty_cache(torch_device) def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False): dtype = torch.float16 if fp16 else torch.float32 @@ -820,7 +820,7 @@ def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - backend_empty_cache() + backend_empty_cache(torch_device) def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False): dtype = torch.float16 if fp16 else torch.float32 diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py index 7459d5a6b617..81e85efe953c 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py @@ -310,7 +310,7 @@ class StableDiffusion2PipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() - backend_empty_cache() + backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): _generator_device = "cpu" if not generator_device.startswith("cuda") else "cuda" @@ -531,7 +531,7 @@ class StableDiffusion2PipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() - backend_empty_cache() + backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): _generator_device = "cpu" if not generator_device.startswith("cuda") else "cuda"