Skip to content

Commit

Permalink
fixxed some bugs and typos
Browse files Browse the repository at this point in the history
  • Loading branch information
Physicist91 committed Jul 29, 2019
1 parent a0316b2 commit 4313719
Show file tree
Hide file tree
Showing 6 changed files with 54 additions and 46 deletions.
2 changes: 1 addition & 1 deletion drawrect.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from dv.init import *
from dv.MyImageFolderWithPaths import CarsDataset
from dv.DFL import DFL_ResNet50
from dv.util import *
from train import *
from validate import *
import sys
Expand All @@ -21,7 +22,6 @@
import os
import re
import numpy as np
from utils.util import *

parser = argparse.ArgumentParser(description='Discriminative Filter Learning within a CNN')
parser.add_argument('--dataroot', default = './dataset', metavar='DIR',
Expand Down
32 changes: 14 additions & 18 deletions mDFL-CNN/utils/save.py → dv/save.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
import os
import shutil
import datetime
from utils.util import *
from dv.util import *

class Log(object):
def save_train_info(self, epoch, batch, maxbatch, losses, top1, top5):
"""
Expand All @@ -13,10 +14,10 @@ def save_train_info(self, epoch, batch, maxbatch, losses, top1, top5):
loss2 = losses[2]
loss3 = losses[3]
root_dir = os.path.abspath('./')
log_dir = os.path.join(root_dir, 'log')
log_dir = os.path.join(root_dir, 'log')
if not os.path.exists(log_dir):
os.mkdir(log_dir)

log_file = os.path.join(log_dir, 'log_train.txt')
if not os.path.exists(log_file):
os.mknod(log_file)
Expand All @@ -30,11 +31,11 @@ def save_train_info(self, epoch, batch, maxbatch, losses, top1, top5):
'Prec@1 ({top1.avg:.3f})\t'
'Prec@5 ({top5.avg:.3f})\n'.format(epoch, batch, maxbatch,loss = loss,loss1 = loss1,loss2 = loss2, loss3=loss3, top1=top1, top5=top5))


def save_test_info(self, epoch, top1, top5):
root_dir = os.path.abspath('./')
log_dir = os.path.join(root_dir, 'log')
# check log_dir
log_dir = os.path.join(root_dir, 'log')
# check log_dir
if not os.path.exists(log_dir):
os.mkdir(log_dir)
log_file = os.path.join(log_dir, 'log_test.txt')
Expand All @@ -48,33 +49,28 @@ def save_test_info(self, epoch, top1, top5):
# this is for weight
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""[summary]
[description]
Arguments:
state {[type]} -- [description] a dict describe some params
is_best {bool} -- [description] a bool value
Keyword Arguments:
filename {str} -- [description] (default: {'checkpoint.pth.tar'})
"""
root_dir = get_root_dir()
weight_dir = os.path.join(root_dir, 'weight')
if not os.path.exists(weight_dir):
os.mkdir(weight_dir)

epoch = state['epoch']
prec1 = state['prec1']

file_path = os.path.join(weight_dir, 'epoch_{:04d}_top1_{:02d}_{}'.format(int(epoch), int(prec1), filename))
file_path = os.path.join(weight_dir, 'epoch_{:04d}_top1_{:02d}_{}'.format(int(epoch), int(prec1), filename))
torch.save(state, file_path)

best_path = os.path.join(weight_dir, 'model_best.pth.tar')

if is_best:
shutil.copyfile(file_path, best_path)





10 changes: 5 additions & 5 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@
from dv.init import *
from dv.MyImageFolderWithPaths import CarsDataset, CUB_2011
from dv.transform import *
from utils.util import *
from dv.util import *
from train import *
from validate import *
from drawrect import *
import sys
import argparse
import os
Expand All @@ -18,7 +19,6 @@
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from drawrect import *

parser = argparse.ArgumentParser(description='Discriminative Filter Learning within a CNN')
parser.add_argument('--dataroot', metavar='DIR',
Expand Down Expand Up @@ -174,15 +174,15 @@ def main():
train_dataset, batch_size=args.gpu * args.train_batchsize_per_gpu, shuffle=True,
num_workers=args.workers, pin_memory=True, drop_last = False)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.testbatch, shuffle=True,
test_dataset, batch_size=args.test_batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, drop_last = False)
test_loader_simple = torch.utils.data.DataLoader(
test_dataset_simple, batch_size=args.testbatch, shuffle=True,
test_dataset_simple, batch_size=args.test_batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, drop_last = False)
print('Deep Vision <==> Part3 : loading dataset <==> Done')


print('DFL-CNN <==> Part4 : model training <==> Begin')
print('Deep Vision <==> Part4 : model training <==> Begin')

if args.gpu is not None:
torch.cuda.empty_cache()
Expand Down
15 changes: 15 additions & 0 deletions run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
CUDA_VISIBLE_DEVICES=0 python main.py \
--gpu 1 \
--dataroot '/export/home/dv/dv029/DFL-CNN/dataset' \
--result './vis_result' \
--lr 0.005 \
--epochs 40 \
--workers 4 \
--train_batchsize_per_gpu 8 \
--init_type 'xavier' \
--print-freq 1 \
--vis_epoch 1 \
--eval_epoch 1 \
--num_filters 4 \
--nclass 196 \
--dataset 'cars'
23 changes: 10 additions & 13 deletions train.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from dv.util import *
from dv.save import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
import os
import sys
import time
from utils.util import *
from utils.save import *
from torchvision import datasets, transforms, utils
import torchvision.models as models
import numpy as np
Expand All @@ -18,17 +18,17 @@ def train(args, train_loader, model, criterion, optimizer, epoch):
top1 = AverageMeter()
top5 = AverageMeter()
log = Log()

losses1 = AverageMeter()
losses2 = AverageMeter()
losses3 = AverageMeter()
# switch to train mode
model.train()

for i, (data, target) in enumerate(train_loader):

target = target.type(torch.LongTensor)

if args.gpu is not None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data = data.to(device)
Expand All @@ -40,9 +40,9 @@ def train(args, train_loader, model, criterion, optimizer, epoch):
loss1 = criterion(out1, target)
loss2 = criterion(out2, target)
loss3 = criterion(out3, target)

loss = loss1 + loss2 + 0.1 * loss3

# measure accuracy and record loss
prec1, prec5 = accuracy(out, target, topk=(1, 5)) # this is metric on trainset
batchsize = data.size(0)
Expand All @@ -56,12 +56,12 @@ def train(args, train_loader, model, criterion, optimizer, epoch):
losses3.update(loss3.item(), batchsize)
top1.update(prec1[0], batchsize)
top5.update(prec5[0], batchsize)

# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()

if i % args.print_freq == 0:
print('DFL-CNN <==> Train Epoch: [{0}][{1}/{2}]\n'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
Expand All @@ -71,9 +71,6 @@ def train(args, train_loader, model, criterion, optimizer, epoch):
'Top1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Top5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), loss=losses, loss1=losses1, loss2=losses2, loss3=losses3, top1=top1, top5=top5))

totalloss = [losses, losses1, losses2, losses3]
log.save_train_info(epoch, i, len(train_loader), totalloss, top1, top5)



18 changes: 9 additions & 9 deletions validate.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
import torch
import time
import sys
from utils.util import *
from utils.save import *
from dv.util import *
from dv.save import *
from tqdm import tqdm

def validate_dv(args, val_loader, model, criterion, epoch):
print('Deep Vision module: validating on the test set...')

batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
log = Log()
model.eval()
model.eval()
end = time.time()

total_output= []
Expand All @@ -40,7 +40,7 @@ def validate_dv(args, val_loader, model, criterion, epoch):
else:
total_output = torch.cat((total_output, output.data.float()) , 0)
total_label = torch.cat((total_label , target.data.float()) , 0)

_,predict = torch.max(total_output,1)

acc = torch.sum(torch.squeeze(predict).float() == total_label).item() / float(total_label.size()[0])
Expand All @@ -54,7 +54,7 @@ def validate(args, val_loader, model, criterion, epoch):
top1 = AverageMeter()
top5 = AverageMeter()
log = Log()
model.eval()
model.eval()
end = time.time()

# we may have ten d in data
Expand Down Expand Up @@ -88,13 +88,13 @@ def validate_simple(args, val_loader, model, criterion, epoch):
top1 = AverageMeter()
top5 = AverageMeter()
log = Log()
model.eval()
model.eval()
end = time.time()

# we may have ten d in data
for i, (data, target) in enumerate(val_loader):
target = target.type(torch.LongTensor)

if args.gpu is not None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data = data.to(device)
Expand All @@ -116,4 +116,4 @@ def validate_simple(args, val_loader, model, criterion, epoch):

print('DFL-CNN <==> Test Total <==> Top1 {:.3f}% Top5 {:.3f}%'.format(top1.avg, top5.avg))
log.save_test_info(epoch, top1, top5)
return top1.avg
return top1.avg

0 comments on commit 4313719

Please sign in to comment.