Skip to content

Commit

Permalink
add training code, cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
crizCraig committed Jan 5, 2017
1 parent 8fd3f2e commit 1e48ed6
Show file tree
Hide file tree
Showing 8 changed files with 396 additions and 44 deletions.
42 changes: 16 additions & 26 deletions drivers/deepdrive_tf/deep_driver_tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,49 +31,40 @@ def __init__(self):
self.max_outputs = np.array([-10., -10., -10., -10., -10., -10.])

def load_net(self):
saver = tf.train.import_meta_graph(os.path.join(DIR_PATH, 'model.ckpt-20048.meta'))
self.image_var = tf.placeholder(tf.float32, (None,) + self.image_shape)
with tf.variable_scope("model") as vs:
self.net = GTANetModel(self.image_var, 6, is_training=False)
saver = tf.train.Saver()
self.sess = tf.Session()

# self.image_var = tf.placeholder(tf.float32, (None,) + self.image_shape)
# self.net_out_var = tf.placeholder(tf.float32, (None, self.num_targets))
# self.sess.run(tf.initialize_all_variables())

saver.restore(self.sess, os.path.join(DIR_PATH, 'model.ckpt-20048'))
pass

# self.net = GTANetModel(self.image_var, is_training=False)

def get_next_action(self, net_out, info):
spin, direction, speed, speed_change, steer, throttle = net_out[0]
steer = -float(steer)
steer -= 0.20
print(steer)
# throttle = -float(throttle)
# speed += 1.0
steer_dead_zone = 0.2
self.max_outputs = np.max(np.array([self.max_outputs, net_out[0]]).T, axis=1)
self.min_outputs = np.min(np.array([self.min_outputs, net_out[0]]).T, axis=1)

print('max outputs', self.max_outputs)
print('min outputs', self.min_outputs)

# Add dead zones
if steer > 0:
steer += steer_dead_zone
steer += 0.2
elif steer < 0:
steer -= steer_dead_zone
steer -= 0.3

logger.debug('steer %f', steer)
print('control tf')
print(' steer %f' % steer)
x_axis_event = JoystickAxisXEvent(steer)
if 'n' in info and 'speed' in info['n'][0]:
current_speed = info['n'][0]['speed']
desired_speed = speed * 20. # Denormalize per deep_drive.h in deepdrive-caffe
desired_speed = speed / 0.05 # Denormalize per deep_drive.h in deepdrive-caffe
if desired_speed < current_speed:
logger.debug('braking')
throttle = self.throttle - (current_speed - desired_speed) * 0.085 # Magic number
throttle = max(throttle, 0.0)
print(' throttle %s' % throttle)
print(' braking: true')
else:
throttle += 13. / 50. # Joystick dead zone
print(' throttle %s' % throttle)
print(' braking false')

z_axis_event = JoystickAxisZEvent(float(throttle))
logging.debug('throttle %s', throttle)
Expand All @@ -85,8 +76,7 @@ def get_next_action(self, net_out, info):
self.throttle = throttle
self.steer = steer

# return self.get_net_out()
return self.get_noop()
return next_action_n

def set_input(self, img):
img = imresize(img, self.image_shape)
Expand All @@ -96,8 +86,8 @@ def set_input(self, img):

def get_net_out(self):
begin = time.time()
net_out = self.sess.run('model/add_5:0', feed_dict={'Placeholder:0': self.image.reshape(1, 227, 227, 3)})
print(net_out)
net_out = self.sess.run(self.net.p, feed_dict={self.image_var: self.image.reshape(1, 227, 227, 3)})
# print(net_out)
end = time.time()
logger.debug('inference time %s', end - begin)
return net_out
10 changes: 5 additions & 5 deletions drivers/deepdrive_tf/gtanet.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@ def __init__(self, x, num_targets=6, is_training=True):
conv5 = tf.nn.relu(conv2d(conv4, "conv5", 256, 3, 1, 2))
maxpool5 = max_pool_2x2(conv5)
fc6 = tf.nn.relu(linear(maxpool5, "fc6", 4096))
# if is_training:
# fc6 = tf.nn.dropout(fc6, 0.5)
if is_training:
fc6 = tf.nn.dropout(fc6, 0.5)
fc7 = tf.nn.relu(linear(fc6, "fc7", 4096))
# if is_training:
# fc7 = tf.nn.dropout(fc7, 0.95)
if is_training:
fc7 = tf.nn.dropout(fc7, 0.95)
fc8 = linear(fc7, "fc8", num_targets)

self.fc8 = fc8
self.p = fc8
self.global_step = tf.get_variable("global_step", [], tf.int32, initializer=tf.zeros_initializer,
trainable=False)
89 changes: 89 additions & 0 deletions drivers/deepdrive_tf/train/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# IPython Notebook
.ipynb_checkpoints

# pyenv
.python-version

# celery beat schedule file
celerybeat-schedule

# dotenv
.env

# virtualenv
venv/
ENV/

# Spyder project settings
.spyderproject

# Rope project settings
.ropeproject
Empty file.
111 changes: 111 additions & 0 deletions drivers/deepdrive_tf/train/data_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
import json
import glob
import numpy as np
import os
import h5py
import random
import threading
import queue

class BackgroundGenerator(threading.Thread):
def __init__(self, generator):
threading.Thread.__init__(self)
self.queue = queue.Queue(1)
self.generator = generator
self.daemon = True
self.start()

def run(self):
for item in self.generator:
self.queue.put(item)
self.queue.put(None)

def __iter__(self):
return self

def __next__(self):
next_item = self.queue.get()
if next_item is None:
raise StopIteration
return next_item

def get_good_files(hdf5_path, train=True):
if train:
# https://gist.github.com/crizCraig/65677883e07c74bdc08f987e806cd95f
with open(hdf5_path + "/good_files.json", "rb") as f:
ids = json.loads(f.read().decode('utf8'))
ids.remove(1)
else:
ids = [1]

ret = []
for i in ids:
name = os.path.join(hdf5_path, "train_%04d.h5" % i)
ret += [name]
return set(ret)

def load_file(h5_filename):
mean_pixel = np.array([104., 117., 123.], np.float32)
out_images = []
out_targets = []

with h5py.File(h5_filename, 'r') as hf:
images = list(hf.get('images'))
targets = list(hf.get('targets'))
perm = np.arange(len(images))
for i in range(len(images)):
idx = perm[i]
img = images[idx].transpose((1, 2, 0)) # CHW => HWC
img = img[:, :, ::-1] # BGR => RGB
img = img.astype(np.float32)
img -= mean_pixel
out_images.append(img)
out_targets.append(targets[idx])
return out_images, out_targets

def file_loader(file_stream):
for h5_filename in file_stream:
print('input file: {}'.format(h5_filename))
yield load_file(h5_filename)

def batch_gen(file_stream, batch_size):
gen = BackgroundGenerator(file_loader(file_stream))
for images, targets in gen:
num_iters = len(images) // batch_size
for i in range(num_iters):
yield images[i * batch_size:(i+1) * batch_size], targets[i * batch_size:(i+1) * batch_size]

class Dataset(object):
def __init__(self, files):
self._files = files

def iterate_once(self, batch_size):
def file_stream():
for file_name in self._files:
yield file_name
yield from batch_gen(file_stream(), batch_size)

def iterate_forever(self, batch_size):
def file_stream():
while True:
random.shuffle(self._files)
for file_name in self._files:
yield file_name
yield from batch_gen(file_stream(), batch_size)


def get_dataset(hdf5_path, train=True):
good_files = get_good_files(hdf5_path, train=train)
file_names = glob.glob(hdf5_path + "/*.h5")
file_names = [fname for fname in file_names if fname in good_files]
return Dataset(file_names)

def run():
hdf5_path = os.environ('DEEPDRIVE_HDF5_PATH')

# print(get_good_files(hdf5_path))
dataset = get_dataset(hdf5_path)
print(dataset)

if __name__ == "__main__":
run()
43 changes: 43 additions & 0 deletions drivers/deepdrive_tf/train/layers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import numpy as np
import tensorflow as tf

def conv(input, kernel, biases, k_h, k_w, c_o, s_h, s_w, padding="VALID", group=1):
'''From https://github.com/ethereon/caffe-tensorflow
'''
c_i = input.get_shape()[-1]
assert c_i % group == 0
assert c_o % group == 0
def convolve(i, k):
return tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)

# TODO: random weight initialization
# W = tf.get_variable("W", shape=[784, 256],
# initializer=tf.contrib.layers.xavier_initializer())

if group == 1:
conv = convolve(input, kernel)
else:
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
conv = tf.concat(3, output_groups)
return tf.reshape(tf.nn.bias_add(conv, biases), [-1] + conv.get_shape().as_list()[1:])

def conv2d(x, name, num_features, kernel_size, stride, group):
input_features = x.get_shape()[3]
w = tf.get_variable(name + "_W", [kernel_size, kernel_size, input_features // group, num_features])
b = tf.get_variable(name + "_b", [num_features])
return conv(x, w, b, kernel_size, kernel_size, num_features, stride, stride, padding="SAME", group=group)

def linear(x, name, size):
input_size = np.prod(list(map(int, x.get_shape()[1:])))
x = tf.reshape(x, [-1, input_size])
w = tf.get_variable(name + "_W", [input_size, size], initializer=tf.random_normal_initializer(0.0, 0.005))
b = tf.get_variable(name + "_b", [size], initializer=tf.zeros_initializer)
return tf.matmul(x, w) + b

def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID')

def lrn(x):
return tf.nn.local_response_normalization(x, depth_radius=2, alpha=2e-05, beta=0.75, bias=1.0)
Loading

0 comments on commit 1e48ed6

Please sign in to comment.