forked from bfortuner/pytorch-kaggle-starter
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlayers.py
31 lines (25 loc) · 933 Bytes
/
layers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import torch.nn as nn
def conv_relu(in_channels, out_channels, kernel_size=3, stride=1,
padding=1, bias=True):
return [
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, bias=bias),
nn.ReLU(inplace=True),
]
def conv_bn_relu(in_channels, out_channels, kernel_size=3, stride=1,
padding=1, bias=False):
return [
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, bias=bias),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
]
def linear_bn_relu_drop(in_channels, out_channels, dropout=0.5, bias=False):
layers = [
nn.Linear(in_channels, out_channels, bias=bias),
nn.BatchNorm1d(out_channels),
nn.ReLU(inplace=True)
]
if dropout > 0:
layers.append(nn.Dropout(dropout))
return layers