Skip to content

Commit

Permalink
Add hogwild example
Browse files Browse the repository at this point in the history
  • Loading branch information
apaszke committed Jan 17, 2017
1 parent 32c7386 commit c94694b
Show file tree
Hide file tree
Showing 3 changed files with 130 additions and 0 deletions.
62 changes: 62 additions & 0 deletions mnist_hogwild/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
from __future__ import print_function
import os, argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp

from train import train

# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 2)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--num-processes', type=int, default=2, metavar='N',
help='how many training processes to use (default: 2)')

class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)

def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x)
x = F.relu(self.fc2(x))
return F.log_softmax(x)

if __name__ == '__main__':
mp.set_start_method('spawn')
args = parser.parse_args()

torch.manual_seed(args.seed)

model = Net()
model.share_memory()

processes = []
for rank in range(args.num_processes):
p = mp.Process(target=train, args=(rank, args, model))
p.start()
processes.append(p)
for p in processes:
p.join()
2 changes: 2 additions & 0 deletions mnist_hogwild/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
torch
torchvision
66 changes: 66 additions & 0 deletions mnist_hogwild/train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
import os
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms

def train(rank, args, model):
torch.manual_seed(args.seed + rank)
for param in model.parameters():
# Break gradient sharing
param.grad.data = param.grad.data.clone()

train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, num_workers=1)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, num_workers=1)

optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
train_epoch(epoch, args, model, train_loader, optimizer)
test_epoch(epoch, args, model, test_loader)


def train_epoch(epoch, args, model, data_loader, optimizer):
model.train()
pid = os.getpid()
samples_seen = 0
for batch_idx, (data, target) in enumerate(data_loader):
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('{}\tTrain Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
pid, epoch, batch_idx * len(data), len(data_loader.dataset),
100. * batch_idx / len(data_loader), loss.data[0]))


def test_epoch(epoch, args, model, data_loader):
model.eval()
test_loss = 0
correct = 0
for data, target in data_loader:
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum()

test_loss = test_loss
test_loss /= len(data_loader) # loss function already averages over batch size
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(data_loader.dataset),
100. * correct / len(data_loader.dataset)))

0 comments on commit c94694b

Please sign in to comment.