Skip to content

Commit

Permalink
init commit
Browse files Browse the repository at this point in the history
  • Loading branch information
a.parkin committed Mar 9, 2019
1 parent 04e3928 commit 0a10ccd
Show file tree
Hide file tree
Showing 159 changed files with 3,015,518 additions and 0 deletions.
57 changes: 57 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
#Solution for ChaLearn Face Anti-spoofing Attack Detection Challenge @ CVPR2019 by a.parkin (VisionLabs)

The solution uses DLAS (Deep Layers Aggregation Solution) architecture models for each of 3 sources (RGB, Depth, IR)

## Picture of the architecture

Сreating the conda environment and installing the required libraries

```
conda create --name python3 --file spec-file.txt;
conda activate python3;
pip install -r requirements.txt
```


## Train
Used pretrained models for face or gender recognition

|Exp. Name|Model architecture|Train description|Link|Google Drive|
|:---:|:------------:|:-------------:|:--------:|:---------:|
|exp1_2stage|resnet caffe34|CASIA, sphere loss|[MCS2018](https://github.com/AlexanderParkin/MCS2018.Baseline)|link|
|exp2|resnet caffe34|Gender classifier on pretrained weights|./attributes_trainer|link|
|exp3b|IR50|MSCeleb, arcface|[face.evoLVe.PyTorch](https://github.com/ZhaoJ9014/face.evoLVe.PyTorch#Model-Zoo)|link|
|exp3c|IR50|asia(private) dataset, arcface|[face.evoLVe.PyTorch](https://github.com/ZhaoJ9014/face.evoLVe.PyTorch#Model-Zoo)|link|


### Step 1 (can be skipped)
Download all pretrained models (exp1_2stage, exp3b, exp3c links) and challenge train/val/test data

### Step 2 (can be skipped)
Download AFAD-Lite and train a model for gender recognition task

### Step 3 (can be skipped)

Train models:

* exp1
* exp2
* exp3b
* exp3c

or run ```train.sh```

## Inference
### Step 1 (can be skipped)
#### Step 1.1
Change data_root path in ```datasets/init_dataloader.py:23```
#### Step 1.2
Run all prepaired models from ```data/opts/``` and
use ```inference.py``` or ```inference.sh```

### Step 2
ensemble all results

```
python ensemble.py
```
12 changes: 12 additions & 0 deletions attributes_trainer/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
##Additional repository for network learning on face / gender recognition tasks for ChaLearn Face Anti-spoofing Attack Detection Challenge

### Step 1
Download AFAD-Lite and unpack in ```data/afad-lite```

### Step 2
Run train on 4 GPU's
```CUDA_VISIBLE_DEVICES=0,1,2,3 python main.py --config data/opts/```

### Step 3
Use trained model in main repository for ```exp2```

1 change: 1 addition & 0 deletions attributes_trainer/datasets/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .init_dataloader import generate_loader
37 changes: 37 additions & 0 deletions attributes_trainer/datasets/init_dataloader.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
from .init_dataset import ImageListDataset
import torch.utils.data
import os
import pandas as pd

def generate_loader(opt, split, inference_list = None):

if split == 'train':
current_transform = opt.train_transform
current_shuffle = True
sampler = None
drop_last = True

else:
current_transform = opt.test_transform
current_shuffle = False
sampler = None
drop_last = False

data_list = os.path.join(opt.data_list, split + '_list.txt')
data_root = opt.data_root
dataset = ImageListDataset(data_root = data_root, data_list = data_list, transform=current_transform)

assert dataset
if split == 'train' and opt.fake_class_weight != 1:
weights = [opt.fake_class_weight if x != 1 else 1.0 for x in dataset.df.label.values]
num_samples = len(dataset)
replacement = True
sampler = torch.utils.data.WeightedRandomSampler(weights, num_samples, replacement)
current_shuffle = False
if split == 'train' and len(dataset) % (opt.batch_size // opt.ngpu) < 32:
drop_last = True

dataset_loader = torch.utils.data.DataLoader(dataset, batch_size = opt.batch_size, shuffle = current_shuffle,
num_workers = int(opt.nthreads),sampler = sampler, pin_memory=True,
drop_last = drop_last)
return dataset_loader
42 changes: 42 additions & 0 deletions attributes_trainer/datasets/init_dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import torch.utils.data as data
import torch
from PIL import Image
import pandas as pd
import numpy as np
import os

import torch.utils.data as data

def rgb_loader(path):
return Image.open(path)


class ImageListDataset(data.Dataset):
"""
Builds a dataset based on a list of images.
data_root - image path prefix
data_list - annotation list location
"""
def __init__(self, data_root, data_list, transform=None):
self.data_root = data_root
#self.df = pd.read_csv(data_list)
self.df = pd.read_csv(data_list)
if 'label' not in self.df.columns:
self.df['label'] = -1
self.transform = transform
self.loader = rgb_loader

def __getitem__(self, index):

path = self.data_root + self.df.path.iloc[index]
img = self.loader(path)

label = self.df.label.iloc[index]

if self.transform is not None:
img = self.transform(img)

return img, label

def __len__(self):
return len(self.df)
53 changes: 53 additions & 0 deletions attributes_trainer/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
import argparse,json,random,os
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torchvision as tv

from trainer import Model
from opts import get_opts
def main():

# Load options
parser = argparse.ArgumentParser(description='Attribute Learner')
parser.add_argument('--config', type=str, help = 'Path to config .opt file. Leave blank if loading from opts.py')

conf = parser.parse_args()
opt = torch.load(conf.config) if conf.config else get_opts()

print('===Options==')
d=vars(opt)
for k in d.keys():
print(k,':',d[k])


# Fix seed
random.seed(opt.manual_seed)
np.random.seed(opt.manual_seed)
torch.manual_seed(opt.manual_seed)
torch.cuda.manual_seed_all(opt.manual_seed)
cudnn.benchmark = True

# Create working directories
try:
os.makedirs(opt.out_path)
os.makedirs(os.path.join(opt.out_path,'checkpoints'))
os.makedirs(os.path.join(opt.out_path,'log_files'))
print( 'Directory {} was successfully created.'.format(opt.out_path))

except OSError:
print( 'Directory {} already exists.'.format(opt.out_path))
pass


# Training
M = Model(opt)
M.train()
'''
TODO: M.test()
'''

if __name__ == '__main__':
main()


81 changes: 81 additions & 0 deletions attributes_trainer/opts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
import argparse, os, json
import torch
import torchvision as tv
from utils import transforms

def get_opts():
opt = argparse.Namespace()

opt.task_name = 'umdfaces_cce_resnext50'
opt.exp_name = 'umdfaces_exp3'
opt.fold = 1
opt.data_root = '/ssd/a.parkin/'
opt.data_list = '/media3/a.parkin/media/FaceDatasets/lists/umdfaces_list/'

opt.out_root = '/media3/a.parkin/media/FaceDatasets/opts/'
opt.out_path = os.path.join(opt.out_root,opt.exp_name,'fold{fold_n}'.format(fold_n=opt.fold))

### Dataloader options ###
opt.nthreads = 32
opt.batch_size = 256 #280
opt.ngpu = 4

### Learning ###
opt.optimizer_name = 'SGD'
opt.lr = 0.1
opt.lr_decay_lvl = 0.1
opt.lr_decay_period = 30
opt.lr_type = 'step_lr'
opt.num_epochs=120
opt.resume = ''
opt.debug = 0
### Other ###
opt.manual_seed = 42
opt.log_batch_interval=10
opt.log_checkpoint = 1
opt.net_type = 'ResNext50'
opt.pretrained = None
opt.loss_type='arc_margin'
opt.nclasses = 8277
opt.fake_class_weight = 1
opt.visdom_port = 8097

opt.git_commit_sha = '3ab79d6c8ec9b280f5fbdd7a8a363a6191fd65ce'
opt.train_transform = tv.transforms.Compose([
#transforms.MergeItems(True, p=0.2),
#transforms.LabelSmoothing(eps=0.1, p=0.2),
tv.transforms.RandomRotation(30, resample=2),
#tv.transforms.Resize((125,125)),
#transforms.CustomGaussianBlur(max_kernel_radius=3, p=0.2),
tv.transforms.RandomResizedCrop(112, scale=(0.5, 1.0)),
tv.transforms.RandomHorizontalFlip(p=0.5),
tv.transforms.RandomApply([
tv.transforms.ColorJitter(0.5,0.5,0.5,0.25)],p=0.2),
tv.transforms.RandomGrayscale(p=0.2),
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010])
])

opt.test_transform = tv.transforms.Compose([
#tv.transforms.Resize((125,125)),
tv.transforms.RandomHorizontalFlip(p=0),
tv.transforms.CenterCrop((112,112)),
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010])
])


return opt


if __name__=='__main__':
parser = argparse.ArgumentParser(description='Options')
parser.add_argument('--savepath', type=str, default = '/media3/a.parkin/media/FaceDatasets/opts/', help = 'Path to save options')
conf = parser.parse_args()
opts = get_opts()
save_dir = os.path.join(conf.savepath, opts.exp_name)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filename = os.path.join(save_dir,opts.exp_name + '_' + 'fold{0}'.format(opts.fold) + '_' + opts.task_name+'.opt')
torch.save(opts, filename)
print('Options file was saved to '+filename)
Loading

0 comments on commit 0a10ccd

Please sign in to comment.