forked from lucidrains/vit-pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
1bae5d3
commit 6db20de
Showing
4 changed files
with
205 additions
and
3 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,144 @@ | ||
import torch | ||
from torch import nn | ||
|
||
from einops import rearrange, repeat | ||
from einops.layers.torch import Rearrange, Reduce | ||
|
||
# helpers | ||
|
||
def exists(val): | ||
return val is not None | ||
|
||
def default(val ,d): | ||
return val if exists(val) else d | ||
|
||
def pair(t): | ||
return t if isinstance(t, tuple) else (t, t) | ||
|
||
# patch merger class | ||
|
||
class PatchMerger(nn.Module): | ||
def __init__(self, dim, num_tokens_out): | ||
super().__init__() | ||
self.scale = dim ** -0.5 | ||
self.norm = nn.LayerNorm(dim) | ||
self.queries = nn.Parameter(torch.randn(num_tokens_out, dim)) | ||
|
||
def forward(self, x): | ||
x = self.norm(x) | ||
sim = torch.matmul(self.queries, x.transpose(-1, -2)) * self.scale | ||
attn = sim.softmax(dim = -1) | ||
return torch.matmul(attn, x) | ||
|
||
# classes | ||
|
||
class PreNorm(nn.Module): | ||
def __init__(self, dim, fn): | ||
super().__init__() | ||
self.norm = nn.LayerNorm(dim) | ||
self.fn = fn | ||
def forward(self, x, **kwargs): | ||
return self.fn(self.norm(x), **kwargs) | ||
|
||
class FeedForward(nn.Module): | ||
def __init__(self, dim, hidden_dim, dropout = 0.): | ||
super().__init__() | ||
self.net = nn.Sequential( | ||
nn.Linear(dim, hidden_dim), | ||
nn.GELU(), | ||
nn.Dropout(dropout), | ||
nn.Linear(hidden_dim, dim), | ||
nn.Dropout(dropout) | ||
) | ||
def forward(self, x): | ||
return self.net(x) | ||
|
||
class Attention(nn.Module): | ||
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.): | ||
super().__init__() | ||
inner_dim = dim_head * heads | ||
project_out = not (heads == 1 and dim_head == dim) | ||
|
||
self.heads = heads | ||
self.scale = dim_head ** -0.5 | ||
|
||
self.attend = nn.Softmax(dim = -1) | ||
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False) | ||
|
||
self.to_out = nn.Sequential( | ||
nn.Linear(inner_dim, dim), | ||
nn.Dropout(dropout) | ||
) if project_out else nn.Identity() | ||
|
||
def forward(self, x): | ||
qkv = self.to_qkv(x).chunk(3, dim = -1) | ||
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv) | ||
|
||
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale | ||
|
||
attn = self.attend(dots) | ||
|
||
out = torch.matmul(attn, v) | ||
out = rearrange(out, 'b h n d -> b n (h d)') | ||
return self.to_out(out) | ||
|
||
class Transformer(nn.Module): | ||
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0., patch_merge_layer = None, patch_merge_num_tokens = 8): | ||
super().__init__() | ||
self.layers = nn.ModuleList([]) | ||
|
||
self.patch_merge_layer_index = default(patch_merge_layer, depth // 2) - 1 # default to mid-way through transformer, as shown in paper | ||
self.patch_merger = PatchMerger(dim = dim, num_tokens_out = patch_merge_num_tokens) | ||
|
||
for _ in range(depth): | ||
self.layers.append(nn.ModuleList([ | ||
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)), | ||
PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout)) | ||
])) | ||
def forward(self, x): | ||
for index, (attn, ff) in enumerate(self.layers): | ||
x = attn(x) + x | ||
x = ff(x) + x | ||
|
||
if index == self.patch_merge_layer_index: | ||
x = self.patch_merger(x) | ||
|
||
return x | ||
|
||
class ViT(nn.Module): | ||
def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, patch_merge_layer = None, patch_merge_num_tokens = 8, channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.): | ||
super().__init__() | ||
image_height, image_width = pair(image_size) | ||
patch_height, patch_width = pair(patch_size) | ||
|
||
assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.' | ||
|
||
num_patches = (image_height // patch_height) * (image_width // patch_width) | ||
patch_dim = channels * patch_height * patch_width | ||
|
||
self.to_patch_embedding = nn.Sequential( | ||
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width), | ||
nn.Linear(patch_dim, dim), | ||
) | ||
|
||
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) | ||
self.dropout = nn.Dropout(emb_dropout) | ||
|
||
self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout, patch_merge_layer, patch_merge_num_tokens) | ||
|
||
self.mlp_head = nn.Sequential( | ||
Reduce('b n d -> b d', 'mean'), | ||
nn.LayerNorm(dim), | ||
nn.Linear(dim, num_classes) | ||
) | ||
|
||
def forward(self, img): | ||
x = self.to_patch_embedding(img) | ||
b, n, _ = x.shape | ||
|
||
x += self.pos_embedding[:, :n] | ||
x = self.dropout(x) | ||
|
||
x = self.transformer(x) | ||
|
||
return self.mlp_head(x) |