Skip to content

Commit

Permalink
[Lora] fix lora fuse unfuse (huggingface#5003)
Browse files Browse the repository at this point in the history
* fix lora fuse unfuse

* add same changes to loaders.py

* add test

---------

Co-authored-by: multimodalart <[email protected]>
  • Loading branch information
patrickvonplaten and multimodalart authored Sep 13, 2023
1 parent 324aef6 commit b47f511
Show file tree
Hide file tree
Showing 3 changed files with 42 additions and 4 deletions.
2 changes: 1 addition & 1 deletion src/diffusers/loaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def _fuse_lora(self, lora_scale=1.0):
self.lora_scale = lora_scale

def _unfuse_lora(self):
if not (hasattr(self, "w_up") and hasattr(self, "w_down")):
if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None):
return

fused_weight = self.regular_linear_layer.weight.data
Expand Down
4 changes: 2 additions & 2 deletions src/diffusers/models/lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def _fuse_lora(self, lora_scale=1.0):
self._lora_scale = lora_scale

def _unfuse_lora(self):
if not (hasattr(self, "w_up") and hasattr(self, "w_down")):
if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None):
return

fused_weight = self.weight.data
Expand Down Expand Up @@ -204,7 +204,7 @@ def _fuse_lora(self, lora_scale=1.0):
self._lora_scale = lora_scale

def _unfuse_lora(self):
if not (hasattr(self, "w_up") and hasattr(self, "w_down")):
if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None):
return

fused_weight = self.weight.data
Expand Down
40 changes: 39 additions & 1 deletion tests/models/test_lora_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
LoRAAttnProcessor2_0,
XFormersAttnProcessor,
)
from diffusers.utils.testing_utils import floats_tensor, require_torch_gpu, slow, torch_device
from diffusers.utils.testing_utils import floats_tensor, nightly, require_torch_gpu, slow, torch_device


def create_unet_lora_layers(unet: nn.Module):
Expand Down Expand Up @@ -1497,3 +1497,41 @@ def test_sdxl_1_0_lora_with_sequential_cpu_offloading(self):
expected = np.array([0.4468, 0.4087, 0.4134, 0.366, 0.3202, 0.3505, 0.3786, 0.387, 0.3535])

self.assertTrue(np.allclose(images, expected, atol=1e-3))

@nightly
def test_sequential_fuse_unfuse(self):
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")

# 1. round
pipe.load_lora_weights("Pclanglais/TintinIA")
pipe.fuse_lora()

generator = torch.Generator().manual_seed(0)
images = pipe(
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
).images
image_slice = images[0, -3:, -3:, -1].flatten()

pipe.unfuse_lora()

# 2. round
pipe.load_lora_weights("ProomptEngineer/pe-balloon-diffusion-style")
pipe.fuse_lora()
pipe.unfuse_lora()

# 3. round
pipe.load_lora_weights("ostris/crayon_style_lora_sdxl")
pipe.fuse_lora()
pipe.unfuse_lora()

# 4. back to 1st round
pipe.load_lora_weights("Pclanglais/TintinIA")
pipe.fuse_lora()

generator = torch.Generator().manual_seed(0)
images_2 = pipe(
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
).images
image_slice_2 = images_2[0, -3:, -3:, -1].flatten()

self.assertTrue(np.allclose(image_slice, image_slice_2, atol=1e-3))

0 comments on commit b47f511

Please sign in to comment.