Skip to content

Commit

Permalink
pure base models.
Browse files Browse the repository at this point in the history
  • Loading branch information
donnyyou committed Jan 6, 2020
1 parent f49a6d4 commit 1e9a86a
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 25 deletions.
24 changes: 7 additions & 17 deletions lib/model/base/deepbase_resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,15 +121,14 @@ class DeepbaseResNet(nn.Module):

def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, classifier=True):
norm_layer=None):
super(DeepbaseResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer

self.inplanes = 128
self.dilation = 1
self.classifier = classifier
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
Expand Down Expand Up @@ -157,9 +156,8 @@ def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
if self.classifier:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)

for m in self.modules():
if isinstance(m, nn.Conv2d):
Expand Down Expand Up @@ -203,7 +201,6 @@ def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
return nn.Sequential(*layers)

def _forward_impl(self, x):
out_dict = dict()
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
Expand All @@ -218,21 +215,14 @@ def _forward_impl(self, x):
x = self.maxpool(x)

x = self.layer1(x)
out_dict['layer1'] = x
x = self.layer2(x)
out_dict['layer2'] = x
x = self.layer3(x)
out_dict['layer3'] = x
x = self.layer4(x)
out_dict['layer4'] = x

if self.classifier:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
out_dict['fc'] = x

return out_dict
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x

def forward(self, x):
return self._forward_impl(x)
Expand Down
13 changes: 5 additions & 8 deletions lib/model/base/densenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,11 +143,10 @@ class DenseNet(nn.Module):
__constants__ = ['features']

def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, memory_efficient=False, classifier=True):
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, memory_efficient=False):

super(DenseNet, self).__init__()

self.classifier = classifier
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2,
Expand Down Expand Up @@ -179,9 +178,8 @@ def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))

if self.classifier:
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
self.classifier = nn.Linear(num_features, num_classes)

# Official init from torch repo.
for m in self.modules():
Expand All @@ -196,10 +194,9 @@ def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
if self.classifer:
out = F.adaptive_avg_pool2d(out, (1, 1))
out = torch.flatten(out, 1)
out = self.classifier(out)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = torch.flatten(out, 1)
out = self.classifier(out)

return out

Expand Down
11 changes: 11 additions & 0 deletions lib/model/base/dfnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,8 @@ def __init__(self, num_classes=1000):
self.stage4 = self._make_layer(256, 3, stride=2)
self.stage5 = self._make_layer(512, 1, stride=1)
self.num_features = 512 * BasicBlock.expansion
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * BasicBlock.expansion, num_classes)

for m in self.modules():
if isinstance(m, nn.Conv2d):
Expand Down Expand Up @@ -107,6 +109,9 @@ def forward(self, x):
x = self.stage3(x) # 16x128
x = self.stage4(x) # 32x256
x = self.stage5(x) # 32x512
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x


Expand All @@ -131,6 +136,9 @@ def __init__(self, num_classes=1000):
self.stage4_2 = self._make_layer(512, 2, stride=1)
self.num_features = 512 * BasicBlock.expansion

self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * BasicBlock.expansion, num_classes)

for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
Expand Down Expand Up @@ -165,6 +173,9 @@ def forward(self, x):
x = self.stage3_2(x) # 16x128
x = self.stage4_1(x) # 32x256
x = self.stage4_2(x) # 32x256
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x


Expand Down

0 comments on commit 1e9a86a

Please sign in to comment.