From ce0e28abfefe80f27c09cca39bf7b0456152fbb6 Mon Sep 17 00:00:00 2001 From: sjyan Date: Sun, 24 May 2020 21:55:34 +0800 Subject: [PATCH] fix issue #304 --- .../st_gcn_aaai18/kinetics-skeleton/test.yaml | 4 +-- .../st_gcn_aaai18/ntu-rgbd-xsub/test.yaml | 4 +-- .../st_gcn_aaai18/ntu-rgbd-xview/test.yaml | 4 +-- doc/GETTING_STARTED.md | 5 +-- mmskeleton/models/backbones/st_gcn_aaai18.py | 31 ++++++++++--------- mmskeleton/processor/recognition.py | 5 +++ 6 files changed, 30 insertions(+), 23 deletions(-) diff --git a/configs/recognition/st_gcn_aaai18/kinetics-skeleton/test.yaml b/configs/recognition/st_gcn_aaai18/kinetics-skeleton/test.yaml index 534de1f15..e84435019 100644 --- a/configs/recognition/st_gcn_aaai18/kinetics-skeleton/test.yaml +++ b/configs/recognition/st_gcn_aaai18/kinetics-skeleton/test.yaml @@ -31,5 +31,5 @@ processor_cfg: # debug: true # dataloader setting - batch_size: 256 - gpus: 4 + batch_size: 64 + gpus: 1 diff --git a/configs/recognition/st_gcn_aaai18/ntu-rgbd-xsub/test.yaml b/configs/recognition/st_gcn_aaai18/ntu-rgbd-xsub/test.yaml index 6381b8c5a..b3a9d410e 100644 --- a/configs/recognition/st_gcn_aaai18/ntu-rgbd-xsub/test.yaml +++ b/configs/recognition/st_gcn_aaai18/ntu-rgbd-xsub/test.yaml @@ -32,5 +32,5 @@ processor_cfg: # debug: true # dataloader setting - batch_size: 256 - gpus: 4 + batch_size: 64 + gpus: 1 diff --git a/configs/recognition/st_gcn_aaai18/ntu-rgbd-xview/test.yaml b/configs/recognition/st_gcn_aaai18/ntu-rgbd-xview/test.yaml index e172c0be8..87d45779f 100644 --- a/configs/recognition/st_gcn_aaai18/ntu-rgbd-xview/test.yaml +++ b/configs/recognition/st_gcn_aaai18/ntu-rgbd-xview/test.yaml @@ -32,5 +32,5 @@ processor_cfg: # debug: true # dataloader setting - batch_size: 256 - gpus: 4 + batch_size: 64 + gpus: 1 diff --git a/doc/GETTING_STARTED.md b/doc/GETTING_STARTED.md index fb12ac9de..68a6b866c 100644 --- a/doc/GETTING_STARTED.md +++ b/doc/GETTING_STARTED.md @@ -9,10 +9,11 @@ conda create -n open-mmlab python=3.7 -y conda activate open-mmlab ``` -b. Install PyTorch and torchvision following the [official instructions](https://pytorch.org/), e.g., +b. Install PyTorch and torchvision: ``` shell -conda install pytorch torchvision -c pytorch +conda install pytorch==1.2.0 torchvision==0.4.0 -c pytorch ``` +The higher versions are not covered by tests. c. Clone mmskeleton from github: diff --git a/mmskeleton/models/backbones/st_gcn_aaai18.py b/mmskeleton/models/backbones/st_gcn_aaai18.py index ec101c67a..0fe0bc28b 100644 --- a/mmskeleton/models/backbones/st_gcn_aaai18.py +++ b/mmskeleton/models/backbones/st_gcn_aaai18.py @@ -25,7 +25,6 @@ class ST_GCN_18(nn.Module): :math:`V_{in}` is the number of graph nodes, :math:`M_{in}` is the number of instance in a frame. """ - def __init__(self, in_channels, num_class, @@ -37,20 +36,25 @@ def __init__(self, # load graph self.graph = Graph(**graph_cfg) - A = torch.tensor( - self.graph.A, dtype=torch.float32, requires_grad=False) + A = torch.tensor(self.graph.A, + dtype=torch.float32, + requires_grad=False) self.register_buffer('A', A) # build networks spatial_kernel_size = A.size(0) temporal_kernel_size = 9 kernel_size = (temporal_kernel_size, spatial_kernel_size) - self.data_bn = nn.BatchNorm1d( - in_channels * A.size(1)) if data_bn else lambda x: x + self.data_bn = nn.BatchNorm1d(in_channels * + A.size(1)) if data_bn else lambda x: x kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'} self.st_gcn_networks = nn.ModuleList(( - st_gcn_block( - in_channels, 64, kernel_size, 1, residual=False, **kwargs0), + st_gcn_block(in_channels, + 64, + kernel_size, + 1, + residual=False, + **kwargs0), st_gcn_block(64, 64, kernel_size, 1, **kwargs), st_gcn_block(64, 64, kernel_size, 1, **kwargs), st_gcn_block(64, 64, kernel_size, 1, **kwargs), @@ -75,7 +79,6 @@ def __init__(self, self.fcn = nn.Conv2d(256, num_class, kernel_size=1) def forward(self, x): - # data normalization N, C, T, V, M = x.size() x = x.permute(0, 4, 3, 1, 2).contiguous() @@ -85,7 +88,7 @@ def forward(self, x): x = x.permute(0, 1, 3, 4, 2).contiguous() x = x.view(N * M, C, T, V) - # forwad + # forward for gcn, importance in zip(self.st_gcn_networks, self.edge_importance): x, _ = gcn(x, self.A * importance) @@ -148,7 +151,6 @@ class st_gcn_block(nn.Module): :math:`V` is the number of graph nodes. """ - def __init__(self, in_channels, out_channels, @@ -187,11 +189,10 @@ def __init__(self, else: self.residual = nn.Sequential( - nn.Conv2d( - in_channels, - out_channels, - kernel_size=1, - stride=(stride, 1)), + nn.Conv2d(in_channels, + out_channels, + kernel_size=1, + stride=(stride, 1)), nn.BatchNorm2d(out_channels), ) diff --git a/mmskeleton/processor/recognition.py b/mmskeleton/processor/recognition.py index 983ee7875..e489f070a 100644 --- a/mmskeleton/processor/recognition.py +++ b/mmskeleton/processor/recognition.py @@ -30,7 +30,9 @@ def test(model_cfg, dataset_cfg, checkpoint, batch_size=64, gpus=1, workers=4): prog_bar = ProgressBar(len(dataset)) for data, label in data_loader: with torch.no_grad(): + output = model(data) output = model(data).data.cpu().numpy() + results.append(output) labels.append(label) for i in range(len(data)): @@ -77,7 +79,9 @@ def train( else: model = call_obj(**model_cfg) model.apply(weights_init) + print(111, len(model.edge_importance)) model = MMDataParallel(model, device_ids=range(gpus)).cuda() + print(222, len(model.module.edge_importance)) loss = call_obj(**loss_cfg) # build runner @@ -92,6 +96,7 @@ def train( # run workflow = [tuple(w) for w in workflow] + print(222, len(model.module.edge_importance)) runner.run(data_loaders, workflow, total_epochs, loss=loss)