Skip to content

Commit

Permalink
2-hop edge between time serial
Browse files Browse the repository at this point in the history
  • Loading branch information
yongqyu committed Feb 20, 2019
1 parent a9273b4 commit 0c648b4
Show file tree
Hide file tree
Showing 5 changed files with 95 additions and 32 deletions.
8 changes: 5 additions & 3 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,16 @@ def get_args():
parser.add_argument('--model_path', type=str, default='./models')

parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--learning_rate',type=int, default=0.1)
parser.add_argument('--dropout_rate',type=int, default=0.5)
parser.add_argument('--learning_rate',type=int, default=0.001)
parser.add_argument('--beta1',type=int, default=0.5)
parser.add_argument('--beta2',type=int, default=0.99)
parser.add_argument('--dropout_rate',type=int, default=0.0)
parser.add_argument('--weight_decay',type=int, default=0.0)
parser.add_argument('--val_step',type=int, default=3)

parser.add_argument('--num_epochs',type=int, default=30)
parser.add_argument('--start_epoch',type=int, default=0)
parser.add_argument('--test_epoch',type=int, default=30)
parser.add_argument('--val_step',type=int, default=2)

parser.add_argument('--num_classes',type=int, default=9)

Expand Down
39 changes: 29 additions & 10 deletions layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,13 @@
import torch.nn.functional as F

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = 'cpu'

class GraphConvolution(nn.Module):
def __init__(self, input_dim, output_dim, num_vetex, act=F.relu, dropout=0.5, bias=True):
super(GraphConvolution, self).__init__()

self.alpha = 0.8

self.act = act
self.dropout = nn.Dropout(dropout)
self.weight = nn.Parameter(torch.randn(input_dim, output_dim)).to(device)
Expand All @@ -22,8 +23,6 @@ def __init__(self, input_dim, output_dim, num_vetex, act=F.relu, dropout=0.5, bi

def normalize(self, m):
rowsum = torch.sum(m, 0)
r_mat = torch.diag(rowsum)

r_inv = torch.pow(rowsum, -0.5)
r_mat_inv = torch.diag(r_inv).float()

Expand All @@ -34,10 +33,15 @@ def normalize(self, m):

def forward(self, adj, x):

x = self.dropout(x)

# K-ordered Chebyshev polynomial
adj_norm = self.normalize(adj)
sqr_norm = self.normalize(torch.mm(adj,adj))
m_norm = self.alpha*adj_norm + (1.-self.alpha)*sqr_norm

x_tmp = torch.einsum('abcd,de->abce', x, self.weight)
x_out = torch.einsum('ij,abid->abjd', adj_norm, x_tmp)
x_out = torch.einsum('ij,abid->abjd', m_norm, x_tmp)
if self.bias is not None:
x_out += self.bias

Expand All @@ -47,30 +51,45 @@ def forward(self, adj, x):


class StandConvolution(nn.Module):
def __init__(self, dims, num_classes):
def __init__(self, dims, num_classes, dropout):
super(StandConvolution, self).__init__()

print('input_dim, output_dim')
self.dropout = nn.Dropout(dropout)
self.conv = nn.Sequential(
nn.Conv2d(dims[0], dims[1], kernel_size=3),
nn.BatchNorm2d(dims[1]),
nn.InstanceNorm2d(dims[1]),
nn.ReLU(inplace=True),
nn.AvgPool2d(3, stride=2),
nn.Conv2d(dims[1], dims[2], kernel_size=3),
nn.BatchNorm2d(dims[2]),
nn.InstanceNorm2d(dims[2]),
nn.ReLU(inplace=True),
nn.AvgPool2d(3, stride=2),
nn.Conv2d(dims[2], dims[3], kernel_size=3),
nn.BatchNorm2d(dims[3]),
nn.InstanceNorm2d(dims[3]),
nn.ReLU(inplace=True),
nn.AvgPool2d(3, stride=2)
).to(device)

self.fc = nn.Linear(dims[3]*3, num_classes).to(device)

def forward(self, x):
x = x.permute(0,3,1,2)
x = self.dropout(x.permute(0,3,1,2))
x_tmp = self.conv(x)
x_out = self.fc(x_tmp.view(x.size(0), -1))

return x_out


class StandRecurrent(nn.Module):
def __init__(self, dims, num_classes, dropout):
super(StandRecurrent, self).__init__()

self.lstm = nn.LSTM(dims[0]*45, dims[1], batch_first=True,
dropout=0).to(device)
self.fc = nn.Linear(dims[1], num_classes).to(device)

def forward(self, x):
x_tmp,_ = self.lstm(x.contiguous().view(x.size(0), x.size(1), -1))
x_out = self.fc(x_tmp[:,-1])

return x_out
53 changes: 46 additions & 7 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@
import torch.utils.data as data

from model import *
from metric import accuracy
from config import get_args
args = get_args()

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = 'cpu'

train_tensor, train_label = torch.load(args.train_path)
valid_tensor, valid_label = torch.load(args.valid_path)
Expand Down Expand Up @@ -41,7 +41,7 @@
A = torch.from_numpy(np.asarray(A)).to(device)

model = GGCN(A, train_tensor.size(3), args.num_classes,
[train_tensor.size(3), train_tensor.size(3)*3], [train_tensor.size(3)*3, 16, 32, 64])
[train_tensor.size(3), train_tensor.size(3)*3], [train_tensor.size(3)*3, 16, 32, 64], args.dropout_rate)
if device == 'cuda':
model.cuda()

Expand All @@ -52,16 +52,24 @@
print('The number of parameters: {}'.format(num_params))

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr = args.learning_rate, weight_decay = args.weight_decay)
optimizer = optim.Adam(model.parameters(), lr = args.learning_rate,
betas=[args.beta1, args.beta2], weight_decay = args.weight_decay)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma = 0.1)

best_epoch = 0
best_acc = 0
def train():
global best_epoch, best_acc

if args.start_epoch:
model.load_state_dict(torch.load(os.path.join(args.model_path,
'model-%d.pkl'%(args.start_epoch))).state_dict())
'model-%d.pkl'%(args.start_epoch))))

# Training
for epoch in range(args.start_epoch, args.num_epochs):
train_loss = 0
train_acc = 0
scheduler.step()
model.train()
for i, x in enumerate(train_loader):
logit = model(x[0].float())
Expand All @@ -73,6 +81,11 @@ def train():
loss.backward()
optimizer.step()

train_loss += loss.item()
train_acc += accuracy(logit, target.view(1))

print('[epoch',epoch+1,'] Train loss:',train_loss/i, 'Train Acc:',train_acc/i)

if (epoch+1) % args.val_step == 0:
model.eval()
val_loss = 0
Expand All @@ -82,15 +95,41 @@ def train():
logit = model(x[0].float())
target = valid_label[i]

val_loss += criterion(logit, target.view(1))
val_loss += criterion(logit, target.view(1)).item()
val_acc += accuracy(logit, target.view(1))

if best_acc < (val_acc/i):
best_epoch = epoch+1
best_acc = (val_acc/i)
torch.save(model.state_dict(), os.path.join(args.model_path, 'model-%d.pkl'%(best_epoch)))

print('Val loss:',val_loss/i, 'Val Acc:',val_acc/i)

def test():
global best_epoch

model.load_state_dict(torch.load(os.path.join(args.model_path,
'model-%d.pkl'%(best_epoch))))
print("load model from 'model-%d.pkl'"%(best_epoch))

model.eval()
test_loss = 0
test_acc = 0
with torch.no_grad():
for i, x in enumerate(test_loader):
logit = model(x[0].float())
#print(F.softmax(logit, 1).cpu().numpy(), torch.max(logit, 1)[1].float().cpu().numpy())
target = test_label[i]

print(val_loss)
test_loss += criterion(logit, target.view(1)).item()
test_acc += accuracy(logit, target.view(1))

print('Test loss:',test_loss/i, 'Test Acc:',test_acc/i)

if __name__ == '__main__':
if args.mode == 'train':
train()
elif args.mode == 'test':
best_epoch = args.test_epcoh
best_epoch = args.test_epoch
test()

8 changes: 4 additions & 4 deletions model.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,21 +7,21 @@
from layer import GraphConvolution, StandConvolution

class GGCN(nn.Module):
def __init__(self, adj, num_v, num_classes, gc_dims, sc_dims):
def __init__(self, adj, num_v, num_classes, gc_dims, sc_dims, dropout=0.5):
super(GGCN, self).__init__()

adj = adj + torch.eye(adj.size(0)).to(adj).detach()
ident = torch.eye(adj.size(0)).to(adj)
zeros = torch.zeros(adj.size(0), adj.size(1)).to(adj)
self.adj = torch.cat([torch.cat([adj, ident, zeros], 1),
torch.cat([ident, adj, ident], 1),
torch.cat([zeros, ident, adj], 1)], 0).float()

self.gcl = GraphConvolution(gc_dims[0], gc_dims[1], num_v)
self.conv= StandConvolution(sc_dims, num_classes)
self.gcl = GraphConvolution(gc_dims[0], gc_dims[1], num_v, dropout=dropout)
self.conv= StandConvolution(sc_dims, num_classes, dropout=dropout)

def forward(self, x):
concat_seq = torch.cat([x[:,:-2], x[:,1:-1], x[:,2:]], 2) # 1, 30, 45, 3

multi_conv = self.gcl(self.adj, concat_seq)
logit = self.conv(multi_conv)

Expand Down
19 changes: 11 additions & 8 deletions preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@


file_name = './dataset/Florence_3d_actions/Florence_dataset_WorldCoordinates.txt'
f =open(file_name)
f = open(file_name)
lines = f.readlines()
prev_video = int(lines[0][0])
prev_categ = int(lines[0][2])
Expand Down Expand Up @@ -43,20 +43,20 @@
new_datas.append(np.interp(new_xloc, xloc, data))
frames = torch.from_numpy(np.stack(new_datas, 0)).t()


frames = frames.view(32, -1, 3)
if aid <= 8:
if prev_actor < 9:
train.append(frames)
train_label.append(prev_categ)
elif aid == 9:
elif prev_actor < 10:
valid.append(frames)
valid_label.append(prev_categ)
else:
test.append(frames)
test_label.append(prev_categ)
frames = [np.reshape(np.asarray(features), (-1,3))]
prev_video = vid
prev_categ = cid
prev_actor = aid
prev_video = vid
prev_categ = cid


if len(frames) >= 32:
Expand All @@ -75,12 +75,15 @@


frames = frames.view(32, -1, 3)
if aid <= 8:
if aid < 9:
train.append(frames)
elif aid == 9:
train_label.append(prev_categ)
elif aid < 10:
valid.append(frames)
valid_label.append(prev_categ)
else:
test.append(frames)
test_label.append(prev_categ)

train_label = torch.from_numpy(np.asarray(train_label))
valid_label = torch.from_numpy(np.asarray(valid_label))
Expand Down

0 comments on commit 0c648b4

Please sign in to comment.