Skip to content

Commit

Permalink
norm not needed when reusing attention in lookvit
Browse files Browse the repository at this point in the history
  • Loading branch information
lucidrains committed Jul 19, 2024
1 parent 547bf94 commit ec6c48b
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 2 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
setup(
name = 'vit-pytorch',
packages = find_packages(exclude=['examples']),
version = '1.7.1',
version = '1.7.2',
license='MIT',
description = 'Vision Transformer (ViT) - Pytorch',
long_description=long_description,
Expand Down
2 changes: 1 addition & 1 deletion vit_pytorch/look_vit.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def __init__(

self.split_heads = Rearrange('b n (h d) -> b h n d', h = heads)

self.norm = LayerNorm(dim)
self.norm = LayerNorm(dim) if not reuse_attention else nn.Identity()
self.attend = nn.Softmax(dim = -1)
self.dropout = nn.Dropout(dropout)

Expand Down

0 comments on commit ec6c48b

Please sign in to comment.