From 05edfff33ce0185538b6864aa08b39c2cef12280 Mon Sep 17 00:00:00 2001 From: Phil Wang Date: Sat, 20 Feb 2021 11:32:38 -0800 Subject: [PATCH] cleanup --- vit_pytorch/vit_pytorch.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/vit_pytorch/vit_pytorch.py b/vit_pytorch/vit_pytorch.py index 56a5f8a..3919955 100644 --- a/vit_pytorch/vit_pytorch.py +++ b/vit_pytorch/vit_pytorch.py @@ -6,8 +6,6 @@ from einops import rearrange, repeat from einops.layers.torch import Rearrange -MIN_NUM_PATCHES = 16 - class Residual(nn.Module): def __init__(self, fn): super().__init__() @@ -92,7 +90,6 @@ def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, ml assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_size // patch_size) ** 2 patch_dim = channels * patch_size ** 2 - assert num_patches > MIN_NUM_PATCHES, f'your number of patches ({num_patches}) is way too small for attention to be effective (at least 16). Try decreasing your patch size' assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)' self.to_patch_embedding = nn.Sequential(