forked from udlbook/udlbook
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
1 changed file
with
346 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,346 @@ | ||
{ | ||
"nbformat": 4, | ||
"nbformat_minor": 0, | ||
"metadata": { | ||
"colab": { | ||
"provenance": [], | ||
"authorship_tag": "ABX9TyM3wq9CHLjekkIXIgXRxueE", | ||
"include_colab_link": true | ||
}, | ||
"kernelspec": { | ||
"name": "python3", | ||
"display_name": "Python 3" | ||
}, | ||
"language_info": { | ||
"name": "python" | ||
} | ||
}, | ||
"cells": [ | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": { | ||
"id": "view-in-github", | ||
"colab_type": "text" | ||
}, | ||
"source": [ | ||
"<a href=\"https://colab.research.google.com/github/udlbook/udlbook/blob/main/Notebooks/Chap09/9_5_Augmentation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"source": [ | ||
"# **Notebook 9.5: Augmentation**\n", | ||
"\n", | ||
"This notebook investigates data augmentation for the MNIST-1D model.\n", | ||
"\n", | ||
"Work through the cells below, running each cell in turn. In various places you will see the words \"TO DO\". Follow the instructions at these places and make predictions about what is going to happen or write code to complete the functions.\n", | ||
"\n", | ||
"Contact me at [email protected] if you find any mistakes or have any suggestions.\n" | ||
], | ||
"metadata": { | ||
"id": "el8l05WQEO46" | ||
} | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"source": [ | ||
"# Run this if you're in a Colab to make a local copy of the MNIST 1D repository\n", | ||
"!git clone https://github.com/greydanus/mnist1d" | ||
], | ||
"metadata": { | ||
"id": "syvgxgRr3myY" | ||
}, | ||
"execution_count": null, | ||
"outputs": [] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"source": [ | ||
"import torch, torch.nn as nn\n", | ||
"from torch.utils.data import TensorDataset, DataLoader\n", | ||
"from torch.optim.lr_scheduler import StepLR\n", | ||
"import numpy as np\n", | ||
"import matplotlib.pyplot as plt\n", | ||
"import mnist1d\n", | ||
"import random" | ||
], | ||
"metadata": { | ||
"id": "ckrNsYd13pMe" | ||
}, | ||
"execution_count": null, | ||
"outputs": [] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"source": [ | ||
"args = mnist1d.data.get_dataset_args()\n", | ||
"data = mnist1d.data.get_dataset(args, path='./mnist1d_data.pkl', download=False, regenerate=False)\n", | ||
"\n", | ||
"# The training and test input and outputs are in\n", | ||
"# data['x'], data['y'], data['x_test'], and data['y_test']\n", | ||
"print(\"Examples in training set: {}\".format(len(data['y'])))\n", | ||
"print(\"Examples in test set: {}\".format(len(data['y_test'])))\n", | ||
"print(\"Length of each example: {}\".format(data['x'].shape[-1]))" | ||
], | ||
"metadata": { | ||
"id": "D_Woo9U730lZ" | ||
}, | ||
"execution_count": null, | ||
"outputs": [] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"source": [ | ||
"D_i = 40 # Input dimensions\n", | ||
"D_k = 200 # Hidden dimensions\n", | ||
"D_o = 10 # Output dimensions\n", | ||
"\n", | ||
"# Define a model with two hidden layers of size 100\n", | ||
"# And ReLU activations between them\n", | ||
"model = nn.Sequential(\n", | ||
"nn.Linear(D_i, D_k),\n", | ||
"nn.ReLU(),\n", | ||
"nn.Linear(D_k, D_k),\n", | ||
"nn.ReLU(),\n", | ||
"nn.Linear(D_k, D_o))\n", | ||
"\n", | ||
"def weights_init(layer_in):\n", | ||
" # Initialize the parameters with He initialization\n", | ||
" if isinstance(layer_in, nn.Linear):\n", | ||
" nn.init.kaiming_uniform_(layer_in.weight)\n", | ||
" layer_in.bias.data.fill_(0.0)\n", | ||
"\n", | ||
"# Call the function you just defined\n", | ||
"model.apply(weights_init)" | ||
], | ||
"metadata": { | ||
"id": "JfIFWFIL33eF" | ||
}, | ||
"execution_count": null, | ||
"outputs": [] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"source": [ | ||
"# choose cross entropy loss function (equation 5.24)\n", | ||
"loss_function = torch.nn.CrossEntropyLoss()\n", | ||
"# construct SGD optimizer and initialize learning rate and momentum\n", | ||
"optimizer = torch.optim.SGD(model.parameters(), lr = 0.05, momentum=0.9)\n", | ||
"# object that decreases learning rate by half every 10 epochs\n", | ||
"scheduler = StepLR(optimizer, step_size=10, gamma=0.5)\n", | ||
"# create 100 dummy data points and store in data loader class\n", | ||
"x_train = torch.tensor(data['x'].astype('float32'))\n", | ||
"y_train = torch.tensor(data['y'].transpose().astype('long'))\n", | ||
"x_test= torch.tensor(data['x_test'].astype('float32'))\n", | ||
"y_test = torch.tensor(data['y_test'].astype('long'))\n", | ||
"\n", | ||
"# load the data into a class that creates the batches\n", | ||
"data_loader = DataLoader(TensorDataset(x_train,y_train), batch_size=100, shuffle=True, worker_init_fn=np.random.seed(1))\n", | ||
"\n", | ||
"# Initialize model weights\n", | ||
"model.apply(weights_init)\n", | ||
"\n", | ||
"# loop over the dataset n_epoch times\n", | ||
"n_epoch = 50\n", | ||
"# store the loss and the % correct at each epoch\n", | ||
"errors_train = np.zeros((n_epoch))\n", | ||
"errors_test = np.zeros((n_epoch))\n", | ||
"\n", | ||
"for epoch in range(n_epoch):\n", | ||
" # loop over batches\n", | ||
" for i, batch in enumerate(data_loader):\n", | ||
" # retrieve inputs and labels for this batch\n", | ||
" x_batch, y_batch = batch\n", | ||
" # zero the parameter gradients\n", | ||
" optimizer.zero_grad()\n", | ||
" # forward pass -- calculate model output\n", | ||
" pred = model(x_batch)\n", | ||
" # compute the loss\n", | ||
" loss = loss_function(pred, y_batch)\n", | ||
" # backward pass\n", | ||
" loss.backward()\n", | ||
" # SGD update\n", | ||
" optimizer.step()\n", | ||
"\n", | ||
" # Run whole dataset to get statistics -- normally wouldn't do this\n", | ||
" pred_train = model(x_train)\n", | ||
" pred_test = model(x_test)\n", | ||
" _, predicted_train_class = torch.max(pred_train.data, 1)\n", | ||
" _, predicted_test_class = torch.max(pred_test.data, 1)\n", | ||
" errors_train[epoch] = 100 - 100 * (predicted_train_class == y_train).float().sum() / len(y_train)\n", | ||
" errors_test[epoch]= 100 - 100 * (predicted_test_class == y_test).float().sum() / len(y_test)\n", | ||
" print(f'Epoch {epoch:5d}, train error {errors_train[epoch]:3.2f}, test error {errors_test[epoch]:3.2f}')" | ||
], | ||
"metadata": { | ||
"id": "YFfVbTPE4BkJ" | ||
}, | ||
"execution_count": null, | ||
"outputs": [] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"source": [ | ||
"# Plot the results\n", | ||
"fig, ax = plt.subplots()\n", | ||
"ax.plot(errors_train,'r-',label='train')\n", | ||
"ax.plot(errors_test,'b-',label='test')\n", | ||
"ax.set_ylim(0,100); ax.set_xlim(0,n_epoch)\n", | ||
"ax.set_xlabel('Epoch'); ax.set_ylabel('Error')\n", | ||
"ax.set_title('TrainError %3.2f, Test Error %3.2f'%(errors_train[-1],errors_test[-1]))\n", | ||
"ax.legend()\n", | ||
"plt.show()" | ||
], | ||
"metadata": { | ||
"id": "FmGDd4vB8LyM" | ||
}, | ||
"execution_count": null, | ||
"outputs": [] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"source": [ | ||
"The best test performance is about 33%. Let's see if we can improve on that by augmenting the data." | ||
], | ||
"metadata": { | ||
"id": "55XvoPDO8Qp-" | ||
} | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"source": [ | ||
"def augment(data_in):\n", | ||
" # Create output vector\n", | ||
" data_out = np.zeros_like(data_in)\n", | ||
"\n", | ||
" # TODO: Shift the input data by a random offset\n", | ||
" # (rotating, so points that would go off the end, are added back to the beginning)\n", | ||
" # Replace this line:\n", | ||
" data_out = np.zeros_like(data_in) ;\n", | ||
"\n", | ||
" # TODO: # Randomly scale the data by a factor drawn from a uniform distribution over [0.8,1.2]\n", | ||
" # Replace this line:\n", | ||
" data_out = np.array(data_out)\n", | ||
"\n", | ||
" return data_out" | ||
], | ||
"metadata": { | ||
"id": "IP6z2iox8MOF" | ||
}, | ||
"execution_count": null, | ||
"outputs": [] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"source": [ | ||
"n_data_orig = data['x'].shape[0]\n", | ||
"# We'll double the amount o fdata\n", | ||
"n_data_augment = n_data_orig+4000\n", | ||
"augmented_x = np.zeros((n_data_augment, D_i))\n", | ||
"augmented_y = np.zeros(n_data_augment)\n", | ||
"# First n_data_orig rows are original data\n", | ||
"augmented_x[0:n_data_orig,:] = data['x']\n", | ||
"augmented_y[0:n_data_orig] = data['y']\n", | ||
"\n", | ||
"# Fill in rest of with augmented data\n", | ||
"for c_augment in range(n_data_orig, n_data_augment):\n", | ||
" # Choose a data point randomly\n", | ||
" random_data_index = random.randint(0, n_data_orig-1)\n", | ||
" # Augment the point and store\n", | ||
" augmented_x[c_augment,:] = augment(data['x'][random_data_index,:])\n", | ||
" augmented_y[c_augment] = data['y'][random_data_index]\n" | ||
], | ||
"metadata": { | ||
"id": "bzN0lu5J95AJ" | ||
}, | ||
"execution_count": null, | ||
"outputs": [] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"source": [ | ||
"# choose cross entropy loss function (equation 5.24)\n", | ||
"loss_function = torch.nn.CrossEntropyLoss()\n", | ||
"# construct SGD optimizer and initialize learning rate and momentum\n", | ||
"optimizer = torch.optim.SGD(model.parameters(), lr = 0.05, momentum=0.9)\n", | ||
"# object that decreases learning rate by half every 50 epochs\n", | ||
"scheduler = StepLR(optimizer, step_size=10, gamma=0.5)\n", | ||
"# create 100 dummy data points and store in data loader class\n", | ||
"x_train = torch.tensor(augmented_x.astype('float32'))\n", | ||
"y_train = torch.tensor(augmented_y.transpose().astype('long'))\n", | ||
"x_test= torch.tensor(data['x_test'].astype('float32'))\n", | ||
"y_test = torch.tensor(data['y_test'].astype('long'))\n", | ||
"\n", | ||
"# load the data into a class that creates the batches\n", | ||
"data_loader = DataLoader(TensorDataset(x_train,y_train), batch_size=100, shuffle=True, worker_init_fn=np.random.seed(1))\n", | ||
"\n", | ||
"# Initialize model weights\n", | ||
"model.apply(weights_init)\n", | ||
"\n", | ||
"# loop over the dataset n_epoch times\n", | ||
"n_epoch = 50\n", | ||
"# store the loss and the % correct at each epoch\n", | ||
"errors_train_aug = np.zeros((n_epoch))\n", | ||
"errors_test_aug = np.zeros((n_epoch))\n", | ||
"\n", | ||
"for epoch in range(n_epoch):\n", | ||
" # loop over batches\n", | ||
" for i, batch in enumerate(data_loader):\n", | ||
" # retrieve inputs and labels for this batch\n", | ||
" x_batch, y_batch = batch\n", | ||
" # zero the parameter gradients\n", | ||
" optimizer.zero_grad()\n", | ||
" # forward pass -- calculate model output\n", | ||
" pred = model(x_batch)\n", | ||
" # compute the loss\n", | ||
" loss = loss_function(pred, y_batch)\n", | ||
" # backward pass\n", | ||
" loss.backward()\n", | ||
" # SGD update\n", | ||
" optimizer.step()\n", | ||
"\n", | ||
" # Run whole dataset to get statistics -- normally wouldn't do this\n", | ||
" pred_train = model(x_train)\n", | ||
" pred_test = model(x_test)\n", | ||
" _, predicted_train_class = torch.max(pred_train.data, 1)\n", | ||
" _, predicted_test_class = torch.max(pred_test.data, 1)\n", | ||
" errors_train_aug[epoch] = 100 - 100 * (predicted_train_class == y_train).float().sum() / len(y_train)\n", | ||
" errors_test_aug[epoch]= 100 - 100 * (predicted_test_class == y_test).float().sum() / len(y_test)\n", | ||
" print(f'Epoch {epoch:5d}, train error {errors_train_aug[epoch]:3.2f}, test error {errors_test_aug[epoch]:3.2f}')" | ||
], | ||
"metadata": { | ||
"id": "hZUNrXpS_kRs" | ||
}, | ||
"execution_count": null, | ||
"outputs": [] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"source": [ | ||
"# Plot the results\n", | ||
"fig, ax = plt.subplots()\n", | ||
"ax.plot(errors_train,'r-',label='train')\n", | ||
"ax.plot(errors_test,'b-',label='test')\n", | ||
"ax.plot(errors_test_aug,'g-',label='test (augmented)')\n", | ||
"ax.set_ylim(0,100); ax.set_xlim(0,n_epoch)\n", | ||
"ax.set_xlabel('Epoch'); ax.set_ylabel('Error')\n", | ||
"ax.set_title('TrainError %3.2f, Test Error %3.2f'%(errors_train_aug[-1],errors_test_aug[-1]))\n", | ||
"ax.legend()\n", | ||
"plt.show()" | ||
], | ||
"metadata": { | ||
"id": "IcnAW4ixBnuc" | ||
}, | ||
"execution_count": null, | ||
"outputs": [] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"source": [ | ||
"Hopefully, you should see an improvement in performance when we augment the data." | ||
], | ||
"metadata": { | ||
"id": "jgsR7ScJHc9b" | ||
} | ||
} | ||
] | ||
} |