Skip to content

Commit

Permalink
Add check for psutil
Browse files Browse the repository at this point in the history
  • Loading branch information
brkirch authored and AUTOMATIC1111 committed Oct 11, 2022
1 parent c0484f1 commit 98fd5cd
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 6 deletions.
10 changes: 8 additions & 2 deletions modules/sd_hijack.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import modules.textual_inversion.textual_inversion
from modules import prompt_parser, devices, sd_hijack_optimizations, shared
from modules.shared import opts, device, cmd_opts
from modules.sd_hijack_optimizations import invokeAI_mps_available

import ldm.modules.attention
import ldm.modules.diffusionmodules.model
Expand All @@ -31,8 +32,13 @@ def apply_optimizations():
print("Applying v1 cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention_invokeai or not torch.cuda.is_available()):
print("Applying cross attention optimization (InvokeAI).")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI
if not invokeAI_mps_available and shared.device.type == 'mps':
print("The InvokeAI cross attention optimization for MPS requires the psutil package which is not installed.")
print("Applying v1 cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
else:
print("Applying cross attention optimization (InvokeAI).")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
print("Applying cross attention optimization (Doggettx).")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
Expand Down
19 changes: 15 additions & 4 deletions modules/sd_hijack_optimizations.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import math
import sys
import traceback
import psutil
import importlib

import torch
from torch import einsum
Expand Down Expand Up @@ -117,9 +117,20 @@ def split_cross_attention_forward(self, x, context=None, mask=None):

return self.to_out(r2)

# -- From https://github.com/invoke-ai/InvokeAI/blob/main/ldm/modules/attention.py (with hypernetworks support added) --

mem_total_gb = psutil.virtual_memory().total // (1 << 30)
def check_for_psutil():
try:
spec = importlib.util.find_spec('psutil')
return spec is not None
except ModuleNotFoundError:
return False

invokeAI_mps_available = check_for_psutil()

# -- Taken from https://github.com/invoke-ai/InvokeAI --
if invokeAI_mps_available:
import psutil
mem_total_gb = psutil.virtual_memory().total // (1 << 30)

def einsum_op_compvis(q, k, v):
s = einsum('b i d, b j d -> b i j', q, k)
Expand Down Expand Up @@ -193,7 +204,7 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None):
r = einsum_op(q, k, v)
return self.to_out(rearrange(r, '(b h) n d -> b n (h d)', h=h))

# -- End of code from https://github.com/invoke-ai/InvokeAI/blob/main/ldm/modules/attention.py --
# -- End of code from https://github.com/invoke-ai/InvokeAI --

def xformers_attention_forward(self, x, context=None, mask=None):
h = self.heads
Expand Down

0 comments on commit 98fd5cd

Please sign in to comment.