Skip to content

Commit

Permalink
training pipeline
Browse files Browse the repository at this point in the history
  • Loading branch information
gholmes829 committed Apr 30, 2021
1 parent 0406eb2 commit 683126a
Show file tree
Hide file tree
Showing 3 changed files with 137 additions and 20 deletions.
4 changes: 2 additions & 2 deletions car/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,11 +73,11 @@ def get_collapsed_data(self, episodes):
return np.array(collapsed_videos), np.array(collapsed_labels)

def preprocess(self, img):
#disp_img(img)
#self.disp_img(img)
img = cv2.resize(img, settings.img_size_rev)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = img / 255
#disp_img(img)
#self.disp_img(img)
return img

def disp_img(self, img):
Expand Down
8 changes: 6 additions & 2 deletions car/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,12 @@
"""

img_width = 240
img_height = 320
#img_width = 180
#img_height = 240

img_width = 180
img_height = 240


img_size = (img_width, img_height)
img_size_rev = (img_height, img_width)
145 changes: 129 additions & 16 deletions car/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,35 +3,74 @@
"""

import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"

import larq as lq
import tensorflow as tf
from time import time
import numpy as np

from matplotlib import pyplot as plt
plt.style.use("dark_background")

from data import Data
import settings

def plotCosts(loss, accuracy, name):
"""Plots loss and accuracy as function of epochs to describe neural network training"""
figs, axes = plt.subplots(2, sharex=True)
"""Plots loss and accuracy as function of epochs to describe neural network training"""
figs, axes = plt.subplots(2, sharex=True)

plt.suptitle("Training Evalutation: " + name)

axes[0].plot(accuracy, c="cyan")
axes[0].plot(accuracy, "o", c="cyan")

axes[0].set_ylabel("Percent Correct")
axes[0].set_title("Accuracy")

plt.suptitle("Training Evalutation: " + name)
axes[1].plot(loss, c="red")
axes[1].plot(loss, "o", c="red")


axes[1].set_ylabel("RMSE")
axes[1].set_title("Loss")

plt.xlabel("Epoch")

axes[0].plot(accuracy, c="cyan")
axes[0].plot(accuracy, "o", c="cyan")
axes[0].grid(alpha=0.25, ls="--")
axes[1].grid(alpha=0.25, ls="--")

def train_and_evaluate(model, trainingData, trainingLabels, testingData, testingLabels, name="model", epochs=2, batch_size=64, verbose=True):
"""Trains and tests model"""
print("-"*100)
print("Evaluating and Evaluating " + name + ":")
print()
print("Initial testing...")
initialLoss, initialAccuracy = model.evaluate(testingData, testingLabels, verbose=0)

axes[0].set_ylabel("Percent Correct")
axes[0].set_title("Accuracy")
# training
print("Training...")
timer = time()
history = model.fit(trainingData, trainingLabels, batch_size=batch_size, epochs=epochs, verbose=verbose)
elapsed = time() - timer

axes[1].plot(loss, c="red")
axes[1].plot(loss, "o", c="red")
print("Testing...")
trainingLoss, trainingAccuracy = model.evaluate(trainingData, trainingLabels, verbose=0)
testLoss, testAccuracy = model.evaluate(testingData, testingLabels, verbose=verbose)

print("\nTraining time: " + str(round(elapsed, 3)) + " secs")

axes[1].set_ylabel("RMSE")
axes[1].set_title("Loss")

plt.xlabel("Epoch")
print("\nIniital accuracy: " + str(round(100 * initialAccuracy, 3)))
print("Training accuracy: " + str(round(100 * trainingAccuracy, 3)))
print("Test accuracy: " + str(round(100 * testAccuracy, 3)))

axes[0].grid(alpha=0.25, ls="--")
axes[1].grid(alpha=0.25, ls="--")
#loss = np.array([initialLoss] + history.history["loss"])
loss = history.history["loss"]
print(history.history)
#accuracy = np.array([initialAccuracy] + history.history["root_mean_squared_error"])
accuracy = history.history["root_mean_squared_error"]
print("-"*100)
return loss, accuracy

def main():
print("Loading data...")
Expand All @@ -44,12 +83,86 @@ def main():

training_data, training_labels = data.get_collapsed_data(episodes[:7])
testing_data, testing_labels = data.get_collapsed_data(episodes[7:])
m = features.shape[0]

num_training = training_data.shape[0]
num_testing = testing_data.shape[0]

training_data = training_data.reshape((num_training, *settings.img_size, 1))
testing_data = testing_data.reshape((num_testing, *settings.img_size, 1))

print("Data loaded!")

cwd = os.getcwd()
model_path = os.path.join(cwd, "models")

# DEFINING ARCHITECTURE

kwargs = {
"input_quantizer": "ste_sign",
"kernel_quantizer": "ste_sign",
"kernel_constraint": "weight_clip"
}

model = tf.keras.models.Sequential()

model.add(lq.layers.QuantConv2D(24, (5, 5),
kernel_quantizer="ste_sign",
kernel_constraint="weight_clip",
use_bias=False,
input_shape=(*settings.img_size, 1)))

model.add(tf.keras.layers.MaxPooling2D((4, 4)))
model.add(tf.keras.layers.BatchNormalization(scale=False))

model.add(lq.layers.QuantConv2D(36, (5, 5), use_bias=False, **kwargs))
model.add(tf.keras.layers.MaxPooling2D((4, 4)))
model.add(tf.keras.layers.BatchNormalization(scale=False))

model.add(lq.layers.QuantConv2D(48, (5, 5), use_bias=False, **kwargs))
model.add(tf.keras.layers.MaxPooling2D((4, 4)))
model.add(tf.keras.layers.BatchNormalization(scale=False))

model.add(lq.layers.QuantConv2D(64, (3, 3), use_bias=False, **kwargs))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Flatten())

model.add(lq.layers.QuantConv2D(64, (3, 3), use_bias=False, **kwargs))
model.add(tf.keras.layers.BatchNormalization(scale=False))
model.add(tf.keras.layers.Flatten())

model.add(lq.layers.QuantDense(1152, use_bias=False, **kwargs))
model.add(tf.keras.layers.BatchNormalization(scale=False))

model.add(lq.layers.QuantDense(100, use_bias=False, **kwargs))
model.add(tf.keras.layers.BatchNormalization(scale=False))

model.add(lq.layers.QuantDense(50, use_bias=False, **kwargs))
model.add(tf.keras.layers.BatchNormalization(scale=False))

model.add(lq.layers.QuantDense(10, use_bias=False, **kwargs))
model.add(tf.keras.layers.BatchNormalization(scale=False))

model.add(lq.layers.QuantDense(1, use_bias=False, **kwargs))
#model.add(tf.keras.layers.BatchNormalization(scale=False))
#model.add(tf.keras.layers.Dense(1))

# COMPILING
model.compile(optimizer='adam',
loss='mse',
metrics=[tf.keras.metrics.RootMeanSquaredError()])

# EVALUATING
bin_cnn_loss, bin_cnn_accuracy = train_and_evaluate(model, training_data, training_labels, testing_data, testing_labels, name="BinCNN")

plotCosts(bin_cnn_loss, bin_cnn_accuracy, "BinCNN")

predictions = model.predict(testing_data)

for i in range(200):
print(testing_labels[i], round(predictions[i][0], 3))
plt.show()

print("Done!")

if __name__ == "__main__":
main()

0 comments on commit 683126a

Please sign in to comment.