-
Notifications
You must be signed in to change notification settings - Fork 13
/
Copy pathlayers.py
112 lines (94 loc) · 3.98 KB
/
layers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
def bottleNeck(nin,nmid):
return nn.Sequential(
nn.BatchNorm2d(nin),
nn.ReLU(),
nn.Conv2d(nin,nmid, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(nmid),
nn.ReLU(),
nn.Conv2d(nmid,nmid, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(nmid),
nn.ReLU(),
nn.Conv2d(nmid,nmid*4, kernel_size=1, stride=1, padding=0),
)
self.resBlock = nn.Sequential()
def forward(self, input):
out = self.resBlock(input)
return out + input
#return F.leaky_relu(out + input, 0.2)
def convBatch(nin, nout, kernel_size=3, stride=1, padding=1, bias=False, layer=nn.Conv2d, dilation = 1):
return nn.Sequential(
layer(nin, nout, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias, dilation=dilation),
nn.BatchNorm2d(nout),
#nn.LeakyReLU(0.2)
nn.PReLU()
)
def downSampleConv(nin, nout, kernel_size=3, stride=2, padding=1, bias=False):
return nn.Sequential(
convBatch(nin, nout, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),
)
def upSampleConv(nin, nout, kernel_size=3, upscale=2, padding=1, bias=False):
return nn.Sequential(
nn.Upsample(scale_factor=upscale),
convBatch(nin, nout, kernel_size=kernel_size, stride=1, padding=padding, bias=bias),
convBatch(nout, nout, kernel_size=3, stride=1, padding=1, bias=bias),
)
def conv(nin, nout, kernel_size=3, stride=1, padding=1, bias=False, layer=nn.Conv2d,
BN=False, ws=False, activ=nn.LeakyReLU(0.2), gainWS=2):
convlayer = layer(nin, nout, kernel_size, stride=stride, padding=padding, bias=bias)
layers = []
if ws:
layers.append(WScaleLayer(convlayer, gain=gainWS))
if BN:
layers.append(nn.BatchNorm2d(nout))
if activ is not None:
if activ == nn.PReLU:
# to avoid sharing the same parameter, activ must be set to nn.PReLU (without '()')
layers.append(activ(num_parameters=1))
else:
# if activ == nn.PReLU(), the parameter will be shared for the whole network !
layers.append(activ)
layers.insert(ws, convlayer)
return nn.Sequential(*layers)
class ResidualConv(nn.Module):
def __init__(self, nin, nout, bias=False, BN=False, ws=False, activ=nn.LeakyReLU(0.2)):
super(ResidualConv, self).__init__()
convs = [conv(nin, nout, bias=bias, BN=BN, ws=ws, activ=activ),
conv(nout, nout, bias=bias, BN=BN, ws=ws, activ=None)]
self.convs = nn.Sequential(*convs)
res = []
if nin != nout:
res.append(conv(nin, nout, kernel_size=1, padding=0, bias=False, BN=BN, ws=ws, activ=None))
self.res = nn.Sequential(*res)
activation = []
if activ is not None:
if activ == nn.PReLU:
# to avoid sharing the same parameter, activ must be set to nn.PReLU (without '()')
activation.append(activ(num_parameters=1))
else:
# if activ == nn.PReLU(), the parameter will be shared for the whole network !
activation.append(activ)
self.activation = nn.Sequential(*activation)
def forward(self, input):
out = self.convs(input)
return self.activation(out + self.res(input))
class residualConv(nn.Module):
def __init__(self, nin, nout):
super(residualConv,self).__init__()
self.convs = nn.Sequential(
convBatch(nin, nout),
nn.Conv2d(nout, nout, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(nout)
)
self.res = nn.Sequential()
if nin!=nout:
self.res = nn.Sequential(
nn.Conv2d(nin, nout, kernel_size=1, bias=False),
nn.BatchNorm2d(nout)
)
def forward(self, input):
out = self.convs(input)
return F.leaky_relu(out + self.res(input), 0.2)