-
Notifications
You must be signed in to change notification settings - Fork 18
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
0 parents
commit 633bf0f
Showing
26 changed files
with
1,948 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
__pycache__ | ||
data | ||
data/* |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
# Implementation of Unsupervised Pixel Level Domain Adaptation with GAN # | ||
> CS698U (Visual Recognition) | ||
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,173 @@ | ||
'''Train CIFAR10 with PyTorch.''' | ||
from __future__ import print_function | ||
|
||
import torch | ||
import torch.nn as nn | ||
import torch.optim as optim | ||
import torch.nn.functional as F | ||
import torch.backends.cudnn as cudnn | ||
|
||
import torchvision | ||
import torchvision.transforms as transforms | ||
|
||
import os | ||
import argparse | ||
|
||
from models import * | ||
from utils import progress_bar | ||
from torch.autograd import Variable | ||
import pickle | ||
import numpy as np | ||
import sys | ||
|
||
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training') | ||
parser.add_argument('--lr', default=0.1, type=float, help='learning rate') | ||
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') | ||
args = parser.parse_args() | ||
|
||
use_cuda = torch.cuda.is_available() | ||
best_acc = 0 # best test accuracy | ||
start_epoch = 0 # start from epoch 0 or last checkpoint epoch | ||
|
||
# Data | ||
print('==> Preparing data..') | ||
transform_train = transforms.Compose([ | ||
transforms.RandomCrop(32, padding=4), | ||
transforms.RandomHorizontalFlip(), | ||
transforms.ToTensor(), | ||
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), | ||
]) | ||
|
||
transform_test = transforms.Compose([ | ||
transforms.ToTensor(), | ||
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), | ||
]) | ||
|
||
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train) | ||
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) | ||
|
||
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test) | ||
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2) | ||
|
||
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') | ||
|
||
# Model | ||
if args.resume: | ||
# Load checkpoint. | ||
print('==> Resuming from checkpoint..') | ||
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' | ||
checkpoint = torch.load('./checkpoint/ckpt.t7') | ||
net = checkpoint['net'] | ||
best_acc = checkpoint['acc'] | ||
start_epoch = checkpoint['epoch'] | ||
else: | ||
print('==> Building model..') | ||
# net = VGG('VGG19') | ||
# net = ResNet18() | ||
net = PreActResNet18() | ||
# net = GoogLeNet() | ||
# net = DenseNet121() | ||
# net = ResNeXt29_2x64d() | ||
# net = MobileNet() | ||
# net = DPN92() | ||
# net = ShuffleNetG2() | ||
# net = SENet18() | ||
|
||
if use_cuda: | ||
net.cuda() | ||
# net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count())) | ||
cudnn.benchmark = True | ||
|
||
criterion = nn.CrossEntropyLoss() | ||
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4) | ||
|
||
class NetFeatures(nn.Module): | ||
def __init__(self, original_model): | ||
super(NetFeatures, self).__init__() | ||
# print(list(original_model.children())[:-1]) | ||
self.features = nn.Sequential(*list(original_model.children())[:-1]) | ||
|
||
def forward(self, x): | ||
x = self.features(x) | ||
## For PreActResnet only ## | ||
x = F.avg_pool2d(x, 4) | ||
x = x.view(x.size(0), -1) | ||
return x | ||
|
||
net_features = NetFeatures(net) | ||
|
||
# Training | ||
def train(epoch): | ||
print('\nEpoch: %d' % epoch) | ||
net.train() | ||
train_loss = 0 | ||
correct = 0 | ||
total = 0 | ||
for batch_idx, (inputs, targets) in enumerate(trainloader): | ||
if use_cuda: | ||
inputs, targets = inputs.cuda(), targets.cuda() | ||
optimizer.zero_grad() | ||
inputs, targets = Variable(inputs), Variable(targets) | ||
outputs = net(inputs) | ||
loss = criterion(outputs, targets) | ||
loss.backward() | ||
optimizer.step() | ||
|
||
train_loss += loss.data[0] | ||
_, predicted = torch.max(outputs.data, 1) | ||
total += targets.size(0) | ||
correct += predicted.eq(targets.data).cpu().sum() | ||
|
||
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' | ||
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) | ||
|
||
def test(epoch, get_features=False): | ||
global best_acc | ||
net.eval() | ||
test_loss = 0 | ||
correct = 0 | ||
total = 0 | ||
embeddings = [] | ||
for batch_idx, (inputs, targets) in enumerate(testloader): | ||
if use_cuda: | ||
inputs, targets = inputs.cuda(), targets.cuda() | ||
inputs, targets = Variable(inputs, volatile=True), Variable(targets) | ||
outputs = net(inputs) | ||
if get_features: | ||
embeddings.append(net_features(inputs).cpu().data.numpy()) | ||
# print(net_features(inputs).cpu().data.numpy().shape) | ||
|
||
loss = criterion(outputs, targets) | ||
|
||
test_loss += loss.data[0] | ||
_, predicted = torch.max(outputs.data, 1) | ||
total += targets.size(0) | ||
correct += predicted.eq(targets.data).cpu().sum() | ||
|
||
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' | ||
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) | ||
|
||
# Save checkpoint. | ||
acc = 100.*correct/total | ||
if acc > best_acc: | ||
print('Saving..') | ||
print("Epoch:", epoch, "Accuracy:", acc) | ||
state = { | ||
'net': net, #.module if use_cuda else net, | ||
'acc': acc, | ||
'epoch': epoch, | ||
} | ||
if not os.path.isdir('checkpoint'): | ||
os.mkdir('checkpoint') | ||
torch.save(state, './checkpoint/ckpt.t7') | ||
best_acc = acc | ||
if get_features: | ||
embeddings = np.vstack(tuple(embeddings)) | ||
print("Embeddings:", embeddings.shape) | ||
with open('cifar10_fc.pkl', 'wb') as f: | ||
pickle.dump(embeddings, f) | ||
|
||
|
||
for epoch in range(start_epoch, start_epoch+100): | ||
train(epoch) | ||
test(epoch, get_features=True) |
Oops, something went wrong.