Skip to content

Commit

Permalink
fail without crashing when summary writer is not available
Browse files Browse the repository at this point in the history
  • Loading branch information
gabrieleangeletti committed May 16, 2016
1 parent 55b557e commit 4cf6ba7
Show file tree
Hide file tree
Showing 27 changed files with 99 additions and 195 deletions.
1 change: 0 additions & 1 deletion command_line/run_autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,6 @@ def load_from_np(dataset_path):
if FLAGS.v_bias:
bv = np.load(FLAGS.v_bias)

dae.build_model(trX.shape[1], W, bh, bv)
dae.fit(trX, teX, restore_previous_model=FLAGS.restore_previous_model)

# Save the model paramenters
Expand Down
5 changes: 2 additions & 3 deletions command_line/run_conv_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,7 @@ def load_from_np(dataset_path):

# Model training
print('Start Convolutional Network training...')
convnet.build_model(trX.shape[1], trY.shape[1], [int(i) for i in FLAGS.original_shape.split(',')])
convnet.fit(trX, trY, vlX, vlY, restore_previous_model=FLAGS.restore_previous_model)
convnet.fit(trX, trY, [int(i) for i in FLAGS.original_shape.split(',')], vlX, vlY, restore_previous_model=FLAGS.restore_previous_model)

# Test the model
print('Test set accuracy: {}'.format(convnet.predict(teX, teY)))
print('Test set accuracy: {}'.format(convnet.compute_accuracy(teX, teY)))
1 change: 0 additions & 1 deletion command_line/run_dbn.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,6 @@ def load_from_np(dataset_path):

# finetuning
print('Start deep belief net finetuning...')
srbm.build_model(trX.shape[1], trY.shape[1])
srbm.fit(trX, trY, vlX, vlY, restore_previous_model=FLAGS.restore_previous_model)

# Test the model
Expand Down
13 changes: 1 addition & 12 deletions command_line/run_deep_autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@
flags.DEFINE_string('main_dir', 'srbm/', 'Directory to store data relative to the algorithm.')
flags.DEFINE_float('momentum', 0.7, 'Momentum parameter.')
flags.DEFINE_string('save_reconstructions', '', 'Path to a .npy file to save the reconstructions of the model.')
flags.DEFINE_string('encweights', None, 'Path to a npz array containing the weights of the encoding layers.')
flags.DEFINE_string('encbiases', None, 'Path to a npz array containing the encoding layers biases.')

# RBMs layers specific parameters
flags.DEFINE_string('rbm_names', 'rbm', 'Name for the rbm stored_models.')
Expand Down Expand Up @@ -147,19 +145,10 @@ def load_params_npz(npzfilepath):
params.append(npzfile[f])
return params


encodingw = None
encodingb = None

# Fit the model (unsupervised pretraining)
if FLAGS.encweights and FLAGS.encbiases:
encodingw = load_params_npz(FLAGS.encweights)
encodingb = load_params_npz(FLAGS.encbiases)
elif FLAGS.do_pretrain:
if FLAGS.do_pretrain:
encoded_X, encoded_vX = srbm.pretrain(trX, vlX)

# Supervised finetuning
srbm.build_model(trX.shape[1], encodingw, encodingb)
srbm.fit(trX, trRef, vlX, vlRef, restore_previous_model=FLAGS.restore_previous_model)

# Compute the reconstruction loss of the model
Expand Down
3 changes: 1 addition & 2 deletions command_line/run_logistic_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,7 @@ def load_from_np(dataset_path):
batch_size=FLAGS.batch_size)

# Fit the model
l.build_model(trX.shape[1], trY.shape[1])
l.fit(trX, trY, vlX, vlY, restore_previous_model=FLAGS.restore_previous_model)

# Test the model
print('Test set accuracy: {}'.format(l.predict(teX, teY)))
print('Test set accuracy: {}'.format(l.compute_accuracy(teX, teY)))
1 change: 0 additions & 1 deletion command_line/run_rbm.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,6 @@ def load_from_np(dataset_path):

# Fit the model
print('Start training...')
r.build_model(trX.shape[1])
r.fit(trX, teX, restore_previous_model=FLAGS.restore_previous_model)

# Save the model paramenters
Expand Down
2 changes: 0 additions & 2 deletions command_line/run_stacked_autoencoder_supervised.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,6 @@ def load_from_np(dataset_path):
encoded_X, encoded_vX = sdae.pretrain(trX, vlX)

# Supervised finetuning
sdae.build_model(trX.shape[1], trY.shape[1])

sdae.fit(trX, trY, vlX, vlY, restore_previous_model=FLAGS.restore_previous_model)

# Compute the accuracy of the model
Expand Down
8 changes: 1 addition & 7 deletions command_line/run_stacked_autoencoder_unsupervised.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,6 @@
flags.DEFINE_string('save_reconstructions', '', 'Path to a .npy file to save the reconstructions of the model.')
flags.DEFINE_string('save_layers_output_test', '', 'Path to a .npy file to save test set output from all the layers of the model.')
flags.DEFINE_string('save_layers_output_train', '', 'Path to a .npy file to save train set output from all the layers of the model.')
flags.DEFINE_string('encweights', None, 'Path to a npz array containing the weights of the encoding layers.')
flags.DEFINE_string('encbiases', None, 'Path to a npz array containing the encoding layers biases.')
flags.DEFINE_boolean('restore_previous_model', False, 'If true, restore previous model corresponding to model name.')
flags.DEFINE_string('model_name', 'un_sdae', 'Name for the model.')
flags.DEFINE_string('main_dir', 'un_sdae/', 'Directory to store data relative to the algorithm.')
Expand Down Expand Up @@ -174,14 +172,10 @@ def load_params_npz(npzfilepath):
encodingb = None

# Fit the model (unsupervised pretraining)
if FLAGS.encweights and FLAGS.encbiases:
encodingw = load_params_npz(FLAGS.encweights)
encodingb = load_params_npz(FLAGS.encbiases)
elif FLAGS.do_pretrain:
if FLAGS.do_pretrain:
encoded_X, encoded_vX = sdae.pretrain(trX, vlX)

# Supervised finetuning
sdae.build_model(trX.shape[1], encodingw, encodingb)
sdae.fit(trX, trRef, vlX, vlRef, restore_previous_model=FLAGS.restore_previous_model)

# Compute the reconstruction loss of the model
Expand Down
2 changes: 2 additions & 0 deletions models/checkpoint
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
model_checkpoint_path: "sdae"
all_model_checkpoint_paths: "sdae"
2 changes: 2 additions & 0 deletions models/dae-1/checkpoint
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
model_checkpoint_path: "sdae-dae-1"
all_model_checkpoint_paths: "sdae-dae-1"
Binary file added models/dae-1/sdae-dae-1
Binary file not shown.
Binary file added models/dae-1/sdae-dae-1.meta
Binary file not shown.
2 changes: 2 additions & 0 deletions models/dae-2/checkpoint
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
model_checkpoint_path: "sdae-dae-2"
all_model_checkpoint_paths: "sdae-dae-2"
Binary file added models/dae-2/sdae-dae-2
Binary file not shown.
Binary file added models/dae-2/sdae-dae-2.meta
Binary file not shown.
Binary file added models/sdae
Binary file not shown.
Binary file added models/sdae.meta
Binary file not shown.
4 changes: 2 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@

setup(
name='yadlt',
version='0.0.1rc-2',
version='0.0.1rc-3',
url='https://github.com/blackecho/Deep-Learning-Tensorflow',
download_url='https://github.com/blackecho/Deep-Learning-TensorFlow/tarball/0.0.1rc-2',
download_url='https://github.com/blackecho/Deep-Learning-TensorFlow/tarball/0.0.1rc-3',
author='Gabriele Angeletti',
author_email='[email protected]',
description='Implementation of various deep learning algorithms using Tensorflow. Class interfaces is sklearn-like.',
Expand Down
43 changes: 43 additions & 0 deletions yadlt/core/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ def __init__(self, model_name, main_dir, models_dir, data_dir, summary_dir):
self.last_out = None
self.train_step = None
self.cost = None
self.verbose = 0

# tensorflow objects
self.tf_session = None
Expand Down Expand Up @@ -110,6 +111,48 @@ def _initialize_training_parameters(self, loss_func, learning_rate, num_epochs,
self.momentum = momentum
self.l2reg = l2reg

def _run_unsupervised_validation_error_and_summaries(self, epoch, feed):

""" Run the summaries and error computation on the validation set.
:param epoch: current epoch
:param validation_ref: validation reference data
:return: self
"""

try:
result = self.tf_session.run([self.tf_merged_summaries, self.cost], feed_dict=feed)
summary_str = result[0]
err = result[1]
self.tf_summary_writer.add_summary(summary_str, epoch)
except tf.errors.InvalidArgumentError:
print("Summary writer not available at the moment")
result = self.tf_session.run([self.cost], feed_dict=feed)
err = result[0]

if self.verbose == 1:
print("Reconstruction loss at step %s: %s" % (epoch, err))

def _run_supervised_validation_error_and_summaries(self, epoch, feed):

""" Run the summaries and error computation on the validation set.
:param epoch: current epoch
:param validation_set: validation data
:return: self
"""

try:
result = self.tf_session.run([self.tf_merged_summaries, self.accuracy], feed_dict=feed)
summary_str = result[0]
acc = result[1]
self.tf_summary_writer.add_summary(summary_str, epoch)
except tf.errors.InvalidArgumentError:
print("Summary writer not available at the moment")
result = self.tf_session.run([self.accuracy], feed_dict=feed)
acc = result[0]

if self.verbose == 1:
print("Accuracy at step %s: %s" % (epoch, acc))

def predict(self, test_set):

""" Predict the labels for the test set.
Expand Down
23 changes: 2 additions & 21 deletions yadlt/models/autoencoder_models/denoising_autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,8 @@ def _train_model(self, train_set, validation_set):

# if i % 5 == 0:
if validation_set is not None:
self._run_validation_error_and_summaries(i, validation_set)
feed = {self.input_data: validation_set, self.input_data_corr: validation_set}
self._run_unsupervised_validation_error_and_summaries(i, feed)

def _run_train_step(self, train_set):

Expand Down Expand Up @@ -120,26 +121,6 @@ def _corrupt_input(self, data):
else:
return np.copy(data)

def _run_validation_error_and_summaries(self, epoch, validation_set):

""" Run the summaries and error computation on the validation set.
:param epoch: current epoch
:param validation_set: validation data
:return: self
"""

vl_feed = {self.input_data: validation_set, self.input_data_corr: validation_set}
result = self.tf_session.run([self.tf_merged_summaries, self.cost], feed_dict=vl_feed)
summary_str = result[0]
err = result[1]

self.tf_summary_writer.add_summary(summary_str, epoch)

if self.verbose == 1:
print("Validation cost at step %s: %s" % (epoch, err))

def reconstruct(self, data):

""" Reconstruct the test set data using the learned model.
Expand Down
25 changes: 4 additions & 21 deletions yadlt/models/autoencoder_models/stacked_deep_autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ class StackedDeepAutoencoder(model.Model):
"""

def __init__(self, dae_layers, model_name='sdae', main_dir='sdae/', models_dir='models/', data_dir='data/', summary_dir='logs/',
dae_enc_act_func=list(['tanh']), dae_dec_act_func=list(['none']), dae_loss_func=list(['mean_squared']), dae_num_epochs=list([10]),
dae_enc_act_func=list(['tanh']), dae_dec_act_func=list(['none']), dae_loss_func=list(['cross_entropy']), dae_num_epochs=list([10]),
dae_batch_size=list([10]), dataset='mnist', dae_opt=list(['gradient_descent']),
dae_learning_rate=list([0.01]), momentum=0.5, finetune_dropout=1, dae_corr_type=list(['none']),
dae_corr_frac=list([0.]), verbose=1, finetune_loss_func='cross_entropy', finetune_enc_act_func='relu',
Expand Down Expand Up @@ -145,6 +145,7 @@ def fit(self, train_set, train_ref, validation_set=None, validation_ref=None, re
print('Starting Reconstruction finetuning...')

with tf.Session() as self.tf_session:
self.build_model(train_set.shape[1])
self._initialize_tf_utilities_and_ops(restore_previous_model)
self._train_model(train_set, train_ref, validation_set, validation_ref)
self.tf_saver.save(self.tf_session, self.model_path)
Expand Down Expand Up @@ -173,26 +174,8 @@ def _train_model(self, train_set, train_ref, validation_set, validation_ref):
self.keep_prob: self.dropout})

if validation_set is not None:
self._run_validation_error_and_summaries(i, validation_set, validation_ref)

def _run_validation_error_and_summaries(self, epoch, validation_set, validation_ref):

""" Run the summaries and error computation on the validation set.
:param epoch: current epoch
:param validation_ref: validation reference data
:return: self
"""

feed = {self.input_data: validation_set, self.input_labels: validation_ref, self.keep_prob: 1}
result = self.tf_session.run([self.tf_merged_summaries, self.cost], feed_dict=feed)

summary_str = result[0]
acc = result[1]

self.tf_summary_writer.add_summary(summary_str, epoch)

if self.verbose == 1:
print("Reconstruction loss at step %s: %s" % (epoch, acc))
feed = {self.input_data: validation_set, self.input_labels: validation_ref, self.keep_prob: 1}
self._run_unsupervised_validation_error_and_summaries(i, feed)

def get_layers_output(self, dataset):

Expand Down
25 changes: 4 additions & 21 deletions yadlt/models/autoencoder_models/stacked_denoising_autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ class StackedDenoisingAutoencoder(model.Model):
"""

def __init__(self, dae_layers, model_name='sdae', main_dir='sdae/', models_dir='models/', data_dir='data/', summary_dir='logs/',
dae_enc_act_func=list(['tanh']), dae_dec_act_func=list(['none']), dae_loss_func=list(['mean_squared']), dae_num_epochs=list([10]),
dae_enc_act_func=list(['tanh']), dae_dec_act_func=list(['none']), dae_loss_func=list(['cross_entropy']), dae_num_epochs=list([10]),
dae_batch_size=list([10]), dataset='mnist', dae_opt=list(['gradient_descent']), dae_l2reg=list([5e-4]),
dae_learning_rate=list([0.01]), momentum=0.5, finetune_dropout=1, dae_corr_type=list(['none']),
dae_corr_frac=list([0.]), verbose=1, finetune_loss_func='softmax_cross_entropy', finetune_act_func='relu',
Expand Down Expand Up @@ -143,6 +143,7 @@ def fit(self, train_set, train_labels, validation_set=None, validation_labels=No
print('Starting Supervised finetuning...')

with tf.Session() as self.tf_session:
self.build_model(train_set.shape[1], train_labels.shape[1])
self._initialize_tf_utilities_and_ops(restore_previous_model)
self._train_model(train_set, train_labels, validation_set, validation_labels)
self.tf_saver.save(self.tf_session, self.model_path)
Expand Down Expand Up @@ -171,26 +172,8 @@ def _train_model(self, train_set, train_labels, validation_set, validation_label
self.keep_prob: self.dropout})

if validation_set is not None:
self._run_validation_error_and_summaries(i, validation_set, validation_labels)

def _run_validation_error_and_summaries(self, epoch, validation_set, validation_labels):

""" Run the summaries and error computation on the validation set.
:param epoch: current epoch
:param validation_set: validation data
:return: self
"""

feed = {self.input_data: validation_set, self.input_labels: validation_labels, self.keep_prob: 1}
result = self.tf_session.run([self.tf_merged_summaries, self.accuracy], feed_dict=feed)

summary_str = result[0]
acc = result[1]

self.tf_summary_writer.add_summary(summary_str, epoch)

if self.verbose == 1:
print("Accuracy at step %s: %s" % (epoch, acc))
feed = {self.input_data: validation_set, self.input_labels: validation_labels, self.keep_prob: 1}
self._run_supervised_validation_error_and_summaries(i, feed)

def get_layers_output(self, dataset):

Expand Down
31 changes: 15 additions & 16 deletions yadlt/models/convolutional_models/conv_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,12 @@ def __init__(self, layers, model_name='convnet', main_dir='convnet', models_dir=

self.accuracy = None

def fit(self, train_set, train_labels, validation_set=None, validation_labels=None, restore_previous_model=False):
def fit(self, train_set, train_labels, original_shape, validation_set=None, validation_labels=None, restore_previous_model=False):

""" Fit the model to the data.
:param train_set: Training data. shape(n_samples, n_features)
:param train_labels: Labels for the data. shape(n_samples, n_classes)
:param original_shape: original shape of the images in the dataset
:param validation_set: optional, default None. Validation data. shape(nval_samples, n_features)
:param validation_labels: optional, default None. Labels for the validation data. shape(nval_samples, n_classes)
:param restore_previous_model:
Expand All @@ -57,6 +58,7 @@ def fit(self, train_set, train_labels, validation_set=None, validation_labels=No
print('Starting training...')

with tf.Session() as self.tf_session:
self.build_model(train_set.shape[1], train_labels.shape[1], original_shape)
self._initialize_tf_utilities_and_ops(restore_previous_model)
self._train_model(train_set, train_labels, validation_set, validation_labels)
self.tf_saver.save(self.tf_session, self.model_path)
Expand Down Expand Up @@ -85,25 +87,22 @@ def _train_model(self, train_set, train_labels, validation_set, validation_label
self.keep_prob: self.dropout})

if validation_set is not None:
self._run_validation_error_and_summaries(i, validation_set, validation_labels)
feed = {self.input_data: validation_set, self.input_labels: validation_labels, self.keep_prob: 1}
self._run_supervised_validation_error_and_summaries(i, feed)

def _run_validation_error_and_summaries(self, epoch, validation_set, validation_labels):
def compute_accuracy(self, test_set, test_labels):

""" Run the summaries and error computation on the validation set.
:param epoch: current epoch
:param validation_set: validation data
:return: self
""" Compute the accuracy over the test set.
:param test_set: Testing data. shape(n_test_samples, n_features)
:param test_labels: Labels for the test data. shape(n_test_samples, n_classes)
:return: accuracy
"""

feed = {self.input_data: validation_set, self.input_labels: validation_labels, self.keep_prob: 1}
result = self.tf_session.run([self.tf_merged_summaries, self.accuracy], feed_dict=feed)
summary_str = result[0]
acc = result[1]

self.tf_summary_writer.add_summary(summary_str, epoch)

if self.verbose == 1:
print("Accuracy at step %s: %s" % (epoch, acc))
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
return self.accuracy.eval({self.input_data: test_set,
self.input_labels: test_labels,
self.keep_prob: 1})

def build_model(self, n_features, n_classes, original_shape):

Expand Down
Loading

0 comments on commit 4cf6ba7

Please sign in to comment.