Skip to content

Commit

Permalink
Offload when lowvram and medvram are not added
Browse files Browse the repository at this point in the history
  • Loading branch information
Uminosachi committed Sep 11, 2023
1 parent 4650c8a commit a868cf8
Showing 1 changed file with 6 additions and 1 deletion.
7 changes: 6 additions & 1 deletion ia_threading.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@ def clear_cache():
devices.torch_gc()


def is_sdxl_lowvram(sd_model):
return (shared.cmd_opts.lowvram or shared.cmd_opts.medvram or getattr(shared.cmd_opts, "medvram_sdxl", False)
and hasattr(sd_model, "conditioner"))


def webui_reload_model_weights(sd_model=None, info=None):
try:
reload_model_weights(sd_model=sd_model, info=info)
Expand All @@ -26,7 +31,7 @@ def webui_reload_model_weights(sd_model=None, info=None):
def pre_offload_model_weights(sem):
global backup_sd_model, backup_device, backup_ckpt_info
with sem:
if shared.sd_model is not None and not getattr(shared.sd_model, "is_sdxl", False):
if shared.sd_model is not None and not is_sdxl_lowvram(shared.sd_model):
backup_sd_model = shared.sd_model
backup_device = getattr(backup_sd_model, "device", devices.device)
backup_sd_model.to(devices.cpu)
Expand Down

0 comments on commit a868cf8

Please sign in to comment.