Skip to content

Commit

Permalink
update cnn
Browse files Browse the repository at this point in the history
  • Loading branch information
Yazhou-Z committed Oct 4, 2021
1 parent ce68b5e commit 74ab3be
Showing 1 changed file with 3 additions and 165 deletions.
168 changes: 3 additions & 165 deletions CNN_multichannel.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import torch.nn.functional as F
from torch.autograd import Variable


def read_data(data="kaggle_data.xlsx", n = 0):
resArray = []
data = xlrd.open_workbook(data)
Expand Down Expand Up @@ -42,6 +43,7 @@ def read_data(data="kaggle_data.xlsx", n = 0):


class Cnn1d(nn.Module):

def __init__(self, in_channels, out_channels, n_len_seg, n_classes, device, verbose=False):
super(Cnn1d, self).__init__()
self.n_len_seg = n_len_seg
Expand Down Expand Up @@ -133,173 +135,11 @@ def forward(self, x):

return logit

class Net(nn.Module):
def __init__(self, in_dim, n_hidden_1, n_hidden_2, n_hidden_3, out_dim):
super(Net, self).__init__()
self.layer1 = nn.Linear(in_dim, n_hidden_1)
self.layer2 = nn.Linear(n_hidden_1, n_hidden_2)
self.layer3 = nn.Linear(n_hidden_2, n_hidden_3)
self.layer4 = nn.Linear(n_hidden_3, out_dim)

def forward(self, x):
x = torch.tensor(x)
x = x.to(torch.float32)
x = self.layer1(x)
# x = F.relu(self.layer1(x))
# x = self.layer2(x)
x = self.layer2(x)
x = self.layer3(x)
x = F.relu(self.layer4(x))
# x = torch.sigmoid(self.layer3(x))
return x


batch_size = 1
learning_rate = 0.0001
num_epoches = 50

model = Cnn1d(80, 400, 200, 50, 8)

if torch.cuda.is_available():
print('cuda')
model = model.cuda()

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)

# train
epoch = 0
while epoch < num_epoches:
for i in range(len(Xtrain)):
datas = Xtrain[i]
label = Ytrain[i]
if torch.cuda.is_available():
datas = datas.cuda()
label = label.cuda()

out = model(datas)
out = torch.unsqueeze(out, 0)

label = torch.tensor(label, dtype=torch.long)
label = torch.unsqueeze(label, 0)

loss = torch.nn.CrossEntropyLoss()(out, label)

data = [datas, label]
print_loss = loss.data.item()

optimizer.zero_grad()
loss.backward()
optimizer.step()

print('epoch: {}, loss: {:.4}'.format(epoch, print_loss), 'step: ', i + 1)

epoch += 1
if epoch % 10 == 0:
print('epoch: {}, loss: {:.4}'.format(epoch, print_loss))


'''
'''

def load_excel(path):
resArray = []
data = xlrd.open_workbook(path)
table = data.sheet_by_index(0)
for i in range(table.ncols):
line = table.col_values(i)
resArray.append(line)
x = np.array(resArray)
X = []
y = []

for i in range(len(x)):
for num in range(len(x[i][:-1])):
x[i][num] = float(x[i][num])
X.append(x[i][:-1])
if x[i][-1] == 1:
y.append(1)
else:
y.append(0)

X = np.array(X)
X = X.astype(float)


return X, y


def manage(X, spare):
for data in range(len(X)):
for scalar in range(len(X[data])):
X[data][scalar] = (X[data][scalar] // spare) * spare + spare
return X


def expand_data(X, y, size):
new = []
new_label = []
for l in range(len(X)):
if y[l] == 0:
label = 0
else:
label = 1
for i in range(size):
new_col = []
for j in range(len(X[l]) // size):
new_col.append(X[l][(j - 1) * size + i])
new_label.append(label)
new.append(new_col)
new = np.array(new)
new_label = np.array(new_label)
return new, new_label


path = 'bu_data_for_ML.xlsx'
X, y = load_excel(path)

print(X.shape)
y = np.array(y)

X = manage(X, 10)
X, y = expand_data(X, y, 4)
X = np.array(X)
y = np.array(y)
print(X.shape)
# X = StandardScaler().fit_transform(X)
print(X)

Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, y, test_size=0.3, random_state=420)


class Net(nn.Module):
def __init__(self, in_dim, n_hidden_1, n_hidden_2, n_hidden_3, out_dim):
super(Net, self).__init__()
self.layer1 = nn.Linear(in_dim, n_hidden_1)
self.layer2 = nn.Linear(n_hidden_1, n_hidden_2)
self.layer3 = nn.Linear(n_hidden_2, n_hidden_3)
self.layer4 = nn.Linear(n_hidden_3, out_dim)

def forward(self, x):
x = torch.tensor(x)
x = x.to(torch.float32)
x = self.layer1(x)
# x = F.relu(self.layer1(x))
# x = self.layer2(x)
x = self.layer2(x)
x = self.layer3(x)
x = F.relu(self.layer4(x))
# x = torch.sigmoid(self.layer3(x))
return x


batch_size = 1
learning_rate = 0.0001
num_epoches = 50

model = Net(26, 400, 200, 50, 2)

if torch.cuda.is_available():
print('cuda')
model = model.cuda()
Expand Down Expand Up @@ -363,6 +203,4 @@ def forward(self, x):
print('Test Loss: {:.6f}, Acc: {:.6f}'.format(
eval_loss / (len(Xtest)),
eval_acc / (len(Xtest))
))

'''
))

0 comments on commit 74ab3be

Please sign in to comment.