-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
- Loading branch information
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,100 @@ | ||
import torch | ||
import torch.nn as nn | ||
import argparse | ||
import pprint as pp | ||
|
||
|
||
def model_builder(args): | ||
Cin = int(args['Cin']) | ||
Cout1 = int(args['Cout1']) | ||
Cout2 = int(args['Cout2']) | ||
Cout3 = int(args['Cout3']) | ||
Lin_lstm = int(args['Lin_lstm']) | ||
lstm_hidden_size = int(args['lstm_hidden_size']) | ||
lstm_num_layers = int(args['lstm_num_layers']) | ||
batch_size = int(args['batch_size']) | ||
Nlabels = int(args['Nlabels']) | ||
torch.set_default_dtype(torch.float32) | ||
|
||
class pred_model(nn.Module): | ||
def __init__(self, bidirectional_flag): | ||
super(pred_model, self).__init__() | ||
self.bidirectional = bidirectional_flag | ||
self.cnn_layer_1 = nn.Conv2d(Cin, Cout1, kernel_size=3) | ||
self.cnn_layer_2 = nn.Conv2d(Cout1, Cout2, kernel_size=2) | ||
self.cnn_layer_3 = nn.Conv2d(Cout2, Cout3, kernel_size=2) | ||
Lin_fc = 6272 # Conv layers output size for 32x32 input | ||
self.FC = nn.Linear(Lin_fc, Lin_lstm) | ||
|
||
self.LSTM_1 = nn.LSTM(input_size=Lin_lstm, hidden_size=lstm_hidden_size, num_layers=lstm_num_layers, bidirectional=bidirectional_flag, batch_first=True) | ||
|
||
if bidirectional_flag == False: | ||
self.Linear_out = nn.Linear(lstm_hidden_size, Nlabels) | ||
else: | ||
self.Linear_out = nn.Linear(lstm_hidden_size*2, Nlabels) | ||
self.logprob = nn.LogSoftmax(dim=2) | ||
|
||
self.hidden_state, self.cell_state = self.init_hidden(batch=1) | ||
|
||
def init_hidden(self, batch=batch_size): | ||
if self.bidirectional == False: | ||
m=1 | ||
else: | ||
m=2 | ||
return torch.zeros(lstm_num_layers*m, batch, lstm_hidden_size), torch.zeros(lstm_num_layers*m, batch, | ||
lstm_hidden_size) # (num_layers * num_directions, batch, hidden_size) = (1, batch_size, hidden_size) | ||
|
||
def forward(self, input, seq_len, batch_size_fwd, stateful_fwd): | ||
x = self.cnn_layer_1(input) | ||
x = nn.functional.leaky_relu(x) | ||
x = self.cnn_layer_2(x) | ||
x = nn.functional.leaky_relu(x) | ||
x = self.cnn_layer_3(x) | ||
x = nn.functional.leaky_relu(x) | ||
x = x.view(x.size(0), -1) | ||
x = self.FC(x) | ||
x = x.view(batch_size_fwd, seq_len, -1) # reshape to (batch, seq_len, input_size) for LSTM | ||
|
||
if stateful_fwd == True: | ||
output, (hn, cn) = self.LSTM_1(x, [self.hidden_state, | ||
self.cell_state]) # output - (batch, seq_len, num_directions * hidden_size) hidden state in all time steps | ||
# input - (batch, seq_len, input_size) note that batch_first=True! | ||
self.hidden_state = hn # update last hidden state to be used for initial hidden state at next iteration | ||
self.cell_state = cn # # update last cell state to be used for initial hidden state at next iteration | ||
else: | ||
output, (hn, cn) = self.LSTM_1(x) | ||
''' | ||
if self.bidirectional == True: | ||
output = output.view(input.shape[0], input.shape[1], 2, Lstate).permute(0,1,3,2) | ||
output = self.bi_layer(output).view(input.shape[0],input.shape[1],-1) | ||
''' | ||
x = self.logprob(self.Linear_out(output)) | ||
return x | ||
|
||
model = pred_model(False) | ||
return model | ||
|
||
|
||
if __name__ == '__main__': | ||
parser = argparse.ArgumentParser(description='provide arguments') | ||
|
||
# simulation stuf | ||
parser.add_argument('--save_model', help='bool to indicate if to save NN model', default=1) | ||
parser.add_argument('--log_en', help='bool to save log of the script in the working folder', default=1) | ||
parser.add_argument('--plot_show', help='bool to indicate whether to show figures at the end of run', default=0) | ||
parser.add_argument('--Nlabels', help='Number of used labels from the data set', default=11) | ||
|
||
# model params | ||
parser.add_argument('--Cin', help='Input channels', default=1) | ||
parser.add_argument('--Cout1', help=' Channels Conv Layer 1', default=5) | ||
parser.add_argument('--Cout2', help='Channels Conv layer 2', default=2) | ||
parser.add_argument('--Cout3', help='Channels Conv layer 3', default=2) | ||
parser.add_argument('--Lin_lstm', help='Input size to LSTM after FC', default=50) | ||
parser.add_argument('--lstm_hidden_size', help='LSTM hidden size', default=50) | ||
parser.add_argument('--lstm_num_layers', help='Number of LSTM layers', default=1) | ||
parser.add_argument('--batch_size', help='Default batch size', default=1) | ||
|
||
|
||
args = vars(parser.parse_args()) | ||
pp.pprint(args) | ||
a = model_builder(args) |
Large diffs are not rendered by default.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,87 @@ | ||
# Demo code to extract data in python | ||
import h5py | ||
import os | ||
import json | ||
import argparse | ||
import torch | ||
import tools | ||
import numpy as np | ||
|
||
|
||
def load_data(args): | ||
reshape_flag = int(args['reshape_flag']) | ||
zero_pad = int(args['zero_pad']) | ||
|
||
use_channel = 0 | ||
f = open(os.path.join('config', 'file_half.json')) | ||
idx = json.load(f) | ||
|
||
data_train = [] | ||
label_train = [] | ||
data_vld = [] | ||
label_vld = [] | ||
|
||
for d in idx['train']: | ||
q = h5py.File(os.path.join('dsp', d + '.h5'), 'r') | ||
temp = torch.from_numpy(q['ch{}'.format(use_channel)][()]) # [seq_len, 1024] | ||
if zero_pad == 1: | ||
temp = torch.cat((temp, torch.zeros(130 - temp.shape[0], 1024))) # max sequence length is 130 | ||
elif zero_pad > 1: | ||
if temp.shape[0] >= zero_pad: | ||
temp = temp[0:40, :] | ||
label_train.append(torch.from_numpy(q['label'][0:zero_pad, 0]).long()) | ||
else: | ||
temp = torch.cat((temp, torch.zeros(40 - temp.shape[0], 1024))) # pad to length zero_pad | ||
label_train.append(torch.from_numpy(q['label'][:, 0]).long()) | ||
if reshape_flag == 0: | ||
data_train.append(temp) | ||
else: | ||
data_train.append(temp.reshape(-1, 32, 32)) | ||
|
||
# drop sequences with less than 40 samples - 33 in original training set | ||
idx_to_drop = [label_train[idx].shape[0] for idx in range(0, len(idx['train']))] | ||
idx_to_drop = np.asarray(idx_to_drop) | ||
idx_to_drop = np.where(idx_to_drop < 40)[0] | ||
label_train = [i for j, i in enumerate(label_train) if j not in idx_to_drop] | ||
data_train = [i for j, i in enumerate(data_train) if j not in idx_to_drop] | ||
|
||
labels_tr_l = torch.LongTensor(tools.get_last_element(label_train, len(label_train))) | ||
|
||
for d in idx['eval']: | ||
q = h5py.File(os.path.join('dsp', d + '.h5'), 'r') | ||
temp = torch.from_numpy(q['ch{}'.format(use_channel)][()]) # [seq_len, 1024] | ||
if zero_pad == 1: | ||
temp = torch.cat((temp, torch.zeros(145 - temp.shape[0], 1024))) # max sequence length is 145 | ||
elif zero_pad > 1: | ||
if temp.shape[0] >= zero_pad: | ||
temp = temp[0:40, :] | ||
label_vld.append(torch.from_numpy(q['label'][0:40, 0]).long()) | ||
else: | ||
temp = torch.cat((temp, torch.zeros(40 - temp.shape[0], 1024))) # pad to length zero_pad | ||
label_vld.append(torch.from_numpy(q['label'][:, 0]).long()) | ||
if reshape_flag == 0: | ||
data_vld.append(temp) | ||
else: | ||
data_vld.append(temp.reshape(-1, 32, 32)) | ||
|
||
# drop sequences with less than 40 samples - 23 sequences in original eval set | ||
idx_to_drop = [label_vld[idx].shape[0] for idx in range(0, len(idx['eval']))] | ||
idx_to_drop = np.asarray(idx_to_drop) | ||
idx_to_drop = np.where(idx_to_drop < 40)[0] | ||
label_vld = [i for j, i in enumerate(label_vld) if j not in idx_to_drop] | ||
data_vld = [i for j, i in enumerate(data_vld) if j not in idx_to_drop] | ||
|
||
labels_vl_l = torch.LongTensor(tools.get_last_element(label_vld, len(label_vld))) | ||
return data_train, label_train, labels_tr_l, data_vld, label_vld, labels_vl_l | ||
|
||
|
||
if __name__ == "__main__": | ||
parser = argparse.ArgumentParser(description='provide arguments') | ||
# 创建ArgumentParser()对象 | ||
parser.add_argument('--reshape_flag', help='Reshape data to 32x32 matrices', default=1) | ||
parser.add_argument('--zero_pad', help='If true, zero pad train & validation sequences to max length, if Int than pad/subsample to Int', default=40) | ||
# 通过add_argument()方法添加参数 | ||
args = vars(parser.parse_args()) | ||
# parse_args()解析添加的参数 | ||
# vars(object)返回object属性和属性值的字典对象 | ||
load_data(args) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
deep soli - pytorch 实现 | ||
https://github.com/sholevs66/Deep-Soli---Hand-Gesture-recognition.git |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,28 @@ | ||
def chunks(lst, n): | ||
"""Yield successive n-sized chunks from lst.""" | ||
for i in range(0, len(lst), n): | ||
yield lst[i:i + n] | ||
|
||
|
||
def shuffle_data(x, y, y_l, batch_size): | ||
import torch | ||
idx_train_shuffle = torch.randperm(x.shape[0]) | ||
x = x[idx_train_shuffle, :] | ||
x_ch = torch.split(x, batch_size, dim=0) | ||
x_ch = x_ch[0:-1] | ||
idx=list(idx_train_shuffle.numpy()) | ||
y = y[idx_train_shuffle, :] | ||
y_l = y_l[idx_train_shuffle] | ||
y_ch = torch.split(y, batch_size, dim=0) | ||
y_ch = y_ch[0:-1] | ||
|
||
return x, y, x_ch, y_ch, y_l | ||
|
||
|
||
def calc_error(y_pred, y_true): | ||
import torch | ||
return (torch.count_nonzero(y_pred - y_true) / y_pred.shape[0]).cpu().data.numpy() | ||
|
||
|
||
def get_last_element(y, N): | ||
return [y[i][-1] for i in [*range(0, N, 1)]] |