diff --git a/src/diffusers/loaders.py b/src/diffusers/loaders.py index 51814a611a00..2bec9a9ab349 100644 --- a/src/diffusers/loaders.py +++ b/src/diffusers/loaders.py @@ -121,7 +121,7 @@ def _fuse_lora(self, lora_scale=1.0): self.lora_scale = lora_scale def _unfuse_lora(self): - if not (hasattr(self, "w_up") and hasattr(self, "w_down")): + if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None): return fused_weight = self.regular_linear_layer.weight.data diff --git a/src/diffusers/models/lora.py b/src/diffusers/models/lora.py index 834a7051b06d..cc8e3e231e2b 100644 --- a/src/diffusers/models/lora.py +++ b/src/diffusers/models/lora.py @@ -139,7 +139,7 @@ def _fuse_lora(self, lora_scale=1.0): self._lora_scale = lora_scale def _unfuse_lora(self): - if not (hasattr(self, "w_up") and hasattr(self, "w_down")): + if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None): return fused_weight = self.weight.data @@ -204,7 +204,7 @@ def _fuse_lora(self, lora_scale=1.0): self._lora_scale = lora_scale def _unfuse_lora(self): - if not (hasattr(self, "w_up") and hasattr(self, "w_down")): + if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None): return fused_weight = self.weight.data diff --git a/tests/models/test_lora_layers.py b/tests/models/test_lora_layers.py index 9affb37aa5d6..ef6ade9af5c1 100644 --- a/tests/models/test_lora_layers.py +++ b/tests/models/test_lora_layers.py @@ -43,7 +43,7 @@ LoRAAttnProcessor2_0, XFormersAttnProcessor, ) -from diffusers.utils.testing_utils import floats_tensor, require_torch_gpu, slow, torch_device +from diffusers.utils.testing_utils import floats_tensor, nightly, require_torch_gpu, slow, torch_device def create_unet_lora_layers(unet: nn.Module): @@ -1497,3 +1497,41 @@ def test_sdxl_1_0_lora_with_sequential_cpu_offloading(self): expected = np.array([0.4468, 0.4087, 0.4134, 0.366, 0.3202, 0.3505, 0.3786, 0.387, 0.3535]) self.assertTrue(np.allclose(images, expected, atol=1e-3)) + + @nightly + def test_sequential_fuse_unfuse(self): + pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") + + # 1. round + pipe.load_lora_weights("Pclanglais/TintinIA") + pipe.fuse_lora() + + generator = torch.Generator().manual_seed(0) + images = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + image_slice = images[0, -3:, -3:, -1].flatten() + + pipe.unfuse_lora() + + # 2. round + pipe.load_lora_weights("ProomptEngineer/pe-balloon-diffusion-style") + pipe.fuse_lora() + pipe.unfuse_lora() + + # 3. round + pipe.load_lora_weights("ostris/crayon_style_lora_sdxl") + pipe.fuse_lora() + pipe.unfuse_lora() + + # 4. back to 1st round + pipe.load_lora_weights("Pclanglais/TintinIA") + pipe.fuse_lora() + + generator = torch.Generator().manual_seed(0) + images_2 = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + image_slice_2 = images_2[0, -3:, -3:, -1].flatten() + + self.assertTrue(np.allclose(image_slice, image_slice_2, atol=1e-3))