This repository was archived by the owner on Jul 22, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 83
/
Copy pathutils.py
120 lines (97 loc) · 4.49 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from scipy.optimize import linear_sum_assignment
from lapsolver import solve_dense
import time
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class SimpleCNNContainerConvBlocks(nn.Module):
def __init__(self, input_channel, num_filters, kernel_size, output_dim=10):
super(SimpleCNNContainerConvBlocks, self).__init__()
'''
A testing cnn container, which allows initializing a CNN with given dims
We use this one to estimate matched output of conv blocks
num_filters (list) :: number of convolution filters
hidden_dims (list) :: number of neurons in hidden layers
Assumptions:
i) we use only two conv layers and three hidden layers (including the output layer)
ii) kernel size in the two conv layers are identical
'''
self.conv1 = nn.Conv2d(input_channel, num_filters[0], kernel_size)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(num_filters[0], num_filters[1], kernel_size)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
return x
class ModerateCNNContainerConvBlocks(nn.Module):
def __init__(self, num_filters, output_dim=10):
super(ModerateCNNContainerConvBlocks, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=num_filters[0], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=num_filters[0], out_channels=num_filters[1], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=num_filters[1], out_channels=num_filters[2], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=num_filters[2], out_channels=num_filters[3], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=num_filters[3], out_channels=num_filters[4], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=num_filters[4], out_channels=num_filters[5], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
def forward(self, x):
x = self.conv_layer(x)
return x
class ModerateCNNContainerConvBlocksMNIST(nn.Module):
def __init__(self, num_filters, output_dim=10):
super(ModerateCNNContainerConvBlocksMNIST, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=1, out_channels=num_filters[0], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=num_filters[0], out_channels=num_filters[1], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=num_filters[1], out_channels=num_filters[2], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=num_filters[2], out_channels=num_filters[3], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=num_filters[3], out_channels=num_filters[4], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=num_filters[4], out_channels=num_filters[5], kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
def forward(self, x):
x = self.conv_layer(x)
return x
class LeNetContainer(nn.Module):
def __init__(self, num_filters, kernel_size=5):
super(LeNetContainer, self).__init__()
self.conv1 = nn.Conv2d(1, num_filters[0], kernel_size, 1)
self.conv2 = nn.Conv2d(num_filters[0], num_filters[1], kernel_size, 1)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2, 2)
#x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2, 2)
#x = F.relu(x)
return x