Skip to content

Commit

Permalink
first update BiGAE code
Browse files Browse the repository at this point in the history
  • Loading branch information
qore-dl committed Apr 30, 2022
1 parent dafe046 commit 78e6d11
Show file tree
Hide file tree
Showing 205 changed files with 170,235 additions and 0 deletions.
1,349 changes: 1,349 additions & 0 deletions Bi-GAE-Code/Ada_celeba_classification.py

Large diffs are not rendered by default.

435 changes: 435 additions & 0 deletions Bi-GAE-Code/GBDT_celeba_classification.py

Large diffs are not rendered by default.

2,180 changes: 2,180 additions & 0 deletions Bi-GAE-Code/LDA_celeba_classification.py

Large diffs are not rendered by default.

100 changes: 100 additions & 0 deletions Bi-GAE-Code/PascalLoader.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 18 11:58:07 2017
@author: Biagio Brattoli
"""
import os, numpy as np
import torch
import torch.utils.data as data
# from scipy.misc import imread, imresize
import skimage.io as io
from scipy.sparse import csr_matrix
from PIL import Image
import xml.etree.ElementTree as ET

class DataLoader(data.Dataset):
def __init__(self,data_path,trainval,transform,random_crops=0):
self.data_path = data_path
self.transform = transform
self.random_crops = random_crops
self.trainval = trainval

self.__init_classes()
self.names, self.labels = self.__dataset_info()

def __getitem__(self, index):
# imread
# ,mode='RGB'
x = io.imread(self.data_path+'/JPEGImages/'+self.names[index]+'.jpg')
# x = np.transpose(x, (2, 0, 1))
x = Image.fromarray(x)
scale = np.random.rand()*2+0.25
w = int(x.size[0]*scale)
h = int(x.size[1]*scale)
if min(w,h)<227:
scale = 227/min(w,h)
w = int(x.size[0]*scale)
h = int(x.size[1]*scale)
# x = x.resize((w,h), Image.BILINEAR) # Random scale
if self.random_crops==0:
x = self.transform(x)
else:
crops = []
for i in range(self.random_crops):
crops.append(self.transform(x))
x = torch.stack(crops)
y = self.labels[index]
return x, y

def __len__(self):
return len(self.names)

def __dataset_info(self):
#annotation_files = os.listdir(self.data_path+'/Annotations')
with open(self.data_path+'/ImageSets/Main/'+self.trainval+'.txt') as f:
annotations = f.readlines()

annotations = [n[:-1] for n in annotations]

names = []
labels = []
for af in annotations:
if len(af)!=6:
continue
filename = os.path.join(self.data_path,'Annotations',af)
tree = ET.parse(filename+'.xml')
objs = tree.findall('object')
num_objs = len(objs)

boxes = np.zeros((num_objs, 4), dtype=np.uint16)
boxes_cl = np.zeros((num_objs), dtype=np.int32)

for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1

cls = self.class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
boxes_cl[ix] = cls

lbl = np.zeros(self.num_classes)
lbl[boxes_cl] = 1
labels.append(lbl)
names.append(af)

return np.array(names), np.array(labels).astype(np.float32)

def __init_classes(self):
self.classes = ('__background__','aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
self.num_classes = len(self.classes)
self.class_to_ind = dict(zip(self.classes, range(self.num_classes)))

20 changes: 20 additions & 0 deletions Bi-GAE-Code/PascalNetwork.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

from torch import nn
from torchvision import models
import torch

def resnet50(classes, pretrain=True):
if pretrain:
net = models.resnet50(pretrained=True)
else:
net = models.resnet50()
net.avgpool = nn.AdaptiveAvgPool2d(1)
net.fc = nn.Linear(net.fc.in_features, classes)
return net


185 changes: 185 additions & 0 deletions Bi-GAE-Code/PascalTrain.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,185 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import os, sys, numpy as np
import matplotlib.pyplot as plt
import argparse

from sklearn.metrics import average_precision_score

from utils_pas import Logger

import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision.transforms as transforms

import multiprocessing
CORES = 4#int(float(multiprocessing.cpu_count())*0.25)

from PascalLoader import DataLoader
from PascalNetwork import resnet50

#sys.path.append('/export/home/bbrattol/git/JigsawPuzzlePytorch/Architecture')
#from alexnet import AlexNet as Network

from utils_pas import adjust_learning_rate


parser = argparse.ArgumentParser(description='Train network on Pascal VOC 2007')
parser.add_argument('--pascal_path', type=str, default='VOC2007',help='Path to Pascal VOC 2007 folder')
parser.add_argument('--model', default=None, type=str, help='Pretrained model')

parser.add_argument('--gpu', default=1, type=int, help='gpu id')
parser.add_argument('--epochs', default=160, type=int, help='gpu id')
parser.add_argument('--iter_start', default=0, type=int, help='Starting iteration count')
parser.add_argument('--batch', default=12, type=int, help='batch size')
parser.add_argument('--checkpoint', default='checkpoints1/', type=str, help='checkpoint folder')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate for SGD optimizer')
parser.add_argument('--crops', default=10, type=int, help='number of random crops during testing')
args = parser.parse_args()


def compute_mAP(labels,outputs):
y_true = labels.cpu().numpy()
y_pred = outputs.cpu().numpy()
AP = []
for i in range(y_true.shape[0]):
AP.append(average_precision_score(y_true[i],y_pred[i]))
return np.mean(AP)

def main():
if args.gpu is not None:
print('Using GPU %d'%args.gpu)
# os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
else:
print('CPU mode')

normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std= [0.229, 0.224, 0.225])

train_transform = transforms.Compose([transforms.RandomResizedCrop(227),transforms.RandomHorizontalFlip(),transforms.ToTensor(),normalize,])

val_transform = transforms.Compose([
#transforms.Scale(256),
#transforms.CenterCrop(227),
transforms.RandomResizedCrop(227),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
# DataLoader initialize
train_data = DataLoader(args.pascal_path,'trainval',transform=train_transform)
train_loader = torch.utils.data.DataLoader(dataset=train_data,batch_size=args.batch, shuffle=True,num_workers=CORES)

val_data = DataLoader(args.pascal_path,'test',transform=val_transform,random_crops=args.crops)
val_loader = torch.utils.data.DataLoader(dataset=val_data,
batch_size=args.batch,
shuffle=False,
num_workers=CORES)

N = len(train_data.names)
iter_per_epoch = N//args.batch
# Network initialize
#net = Network(groups = 2)
net = resnet50(classes = 21)
if args.gpu is not None:
net=net.cuda()

if args.model is not None:
net.load(args.model,args.fc)

criterion = nn.MultiLabelSoftMarginLoss()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
lr=args.lr,momentum=0.9,weight_decay = 0.0001)

if not os.path.exists(args.checkpoint):
os.makedirs(args.checkpoint+'/train')
os.makedirs(args.checkpoint+'/test')

# logger_test = None
logger_train = Logger(args.checkpoint+'/train')
logger_test = Logger(args.checkpoint+'/test')

############## TRAINING ###############
print('Start training: lr %f, batch size %d'%(args.lr,args.batch))
print('Checkpoint: '+args.checkpoint)

# Train the Model
steps = args.iter_start
for epoch in range(iter_per_epoch*args.iter_start,args.epochs):
adjust_learning_rate(optimizer, epoch, init_lr=args.lr, step=80, decay=0.1)

mAP = []
for i, (images, labels) in enumerate(train_loader):
images = Variable(images)
labels = Variable(labels)
if args.gpu is not None:
images = images.cuda()
labels = labels.cuda()

# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = net(images)

mAP.append(compute_mAP(labels.data,outputs.data))

loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
loss = loss.cpu().data.numpy()

if steps%100==0:
print ('[%d/%d] %d), Loss: %.3f, mAP %.2f%%' %(epoch+1, args.epochs, steps, loss,100*np.mean(mAP[-20:])))

if steps%20==0:
logger_train.scalar_summary('mAP', np.mean(mAP[-20:]), steps)
logger_train.scalar_summary('loss', loss, steps)
data = images.cpu().data.numpy().transpose([0,2,3,1])
# logger_train.image_summary('input', data[:10], steps)

steps += 1

if epoch%5==0:
filename = '%s/jps_%03i.pth' % (args.checkpoint, epoch)
torch.save(net.state_dict(), filename)
# net.save(args.checkpoint,epoch+1)

print ('Saved: '+args.checkpoint)

if epoch%5==0:
test(net,criterion,logger_test,val_loader, steps)

if os.path.exists(args.checkpoint+'/stop.txt'):
# break without using CTRL+C
break

def test(net,criterion,logger,val_loader,steps):
mAP = []
net.eval()
for i, (images, labels) in enumerate(val_loader):
images = images.view((-1,3,227,227))
images = Variable(images)
if args.gpu is not None:
images = images.cuda()

# Forward + Backward + Optimize
outputs = net(images)
outputs = outputs.cpu().data
outputs = outputs.view((-1,args.crops,21))
outputs = outputs.mean(dim=1).view((-1,21))

#score = tnt.meter.mAPMeter(outputs, labels)
mAP.append(compute_mAP(labels,outputs))

if logger is not None:
logger.scalar_summary('mAP', np.mean(mAP), steps)
print ('TESTING: %d), mAP %.2f%%' %(steps,100*np.mean(mAP)))
net.train()


if __name__ == "__main__":
main()

Loading

0 comments on commit 78e6d11

Please sign in to comment.