Skip to content

Commit

Permalink
minor fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
gabrieleangeletti committed May 6, 2016
1 parent c1f92e9 commit f24edb2
Show file tree
Hide file tree
Showing 5 changed files with 13 additions and 9 deletions.
3 changes: 2 additions & 1 deletion command_line/run_logistic_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
flags.DEFINE_string('test_labels', '', 'Path to test labels .npy file.')
flags.DEFINE_string('cifar_dir', '', 'Path to the cifar 10 dataset directory.')
flags.DEFINE_string('main_dir', 'lr/', 'Directory to store data relative to the algorithm.')
flags.DEFINE_string('model_name', 'logreg', 'Name for the model.')
flags.DEFINE_string('loss_func', 'cross_entropy', 'Loss function. ["mean_squared" or "cross_entropy"]')
flags.DEFINE_integer('verbose', 0, 'Level of verbosity. 0 - silent, 1 - print accuracy.')
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
Expand Down Expand Up @@ -79,7 +80,7 @@ def load_from_np(dataset_path):
teY = None

# Create the object
l = logistic_regression.LogisticRegression(
l = logistic_regression.LogisticRegression(model_name=FLAGS.model_name,
dataset=FLAGS.dataset, loss_func=FLAGS.loss_func, main_dir=FLAGS.main_dir, verbose=FLAGS.verbose,
learning_rate=FLAGS.learning_rate, num_epochs=FLAGS.num_epochs, batch_size=FLAGS.batch_size)

Expand Down
3 changes: 2 additions & 1 deletion command_line/run_stacked_autoencoder_supervised.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
flags.DEFINE_string('save_layers_output', '', 'Path to a .npy file to save output from all the layers of the model.')
flags.DEFINE_boolean('restore_previous_model', False, 'If true, restore previous model corresponding to model name.')
flags.DEFINE_integer('seed', -1, 'Seed for the random generators (>= 0). Useful for testing hyperparameters.')
flags.DEFINE_string('model_name', 'sdae', 'Name for the model.')

# Supervised fine tuning parameters
flags.DEFINE_string('finetune_loss_func', 'cross_entropy', 'Last Layer Loss function.["cross_entropy", "mean_squared"]')
Expand Down Expand Up @@ -134,7 +135,7 @@ def load_from_np(dataset_path):
sdae = None

sdae = stacked_denoising_autoencoder.StackedDenoisingAutoencoder(
do_pretrain=FLAGS.do_pretrain,
do_pretrain=FLAGS.do_pretrain, model_name=FLAGS.model_name,
layers=dae_params['layers'], finetune_loss_func=FLAGS.finetune_loss_func,
finetune_learning_rate=FLAGS.finetune_learning_rate, finetune_num_epochs=FLAGS.finetune_num_epochs,
finetune_opt=FLAGS.finetune_opt, finetune_batch_size=FLAGS.finetune_batch_size, dropout=FLAGS.dropout,
Expand Down
6 changes: 3 additions & 3 deletions command_line/run_stacked_autoencoder_unsupervised.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
flags.DEFINE_string('encweights', None, 'Path to a npz array containing the weights of the encoding layers.')
flags.DEFINE_string('encbiases', None, 'Path to a npz array containing the encoding layers biases.')
flags.DEFINE_boolean('restore_previous_model', False, 'If true, restore previous model corresponding to model name.')
flags.DEFINE_integer('seed', -1, 'Seed for the random generators (>= 0). Useful for testing hyperparameters.')
flags.DEFINE_string('model_name', 'un_sdae', 'Name for the model.')

# Supervised fine tuning parameters
flags.DEFINE_string('finetune_loss_func', 'cross_entropy', 'Last Layer Loss function.["cross_entropy", "mean_squared"]')
Expand All @@ -38,7 +38,7 @@
flags.DEFINE_string('finetune_opt', 'gradient_descent', '["gradient_descent", "ada_grad", "momentum"]')
flags.DEFINE_integer('finetune_batch_size', 20, 'Size of each mini-batch for the fine-tuning phase.')
flags.DEFINE_integer('verbose', 0, 'Level of verbosity. 0 - silent, 1 - print accuracy.')
flags.DEFINE_string('main_dir', 'sdae/', 'Directory to store data relative to the algorithm.')
flags.DEFINE_string('main_dir', 'un_sdae/', 'Directory to store data relative to the algorithm.')
flags.DEFINE_string('corr_type', 'none', 'Type of input corruption. ["none", "masking", "salt_and_pepper"]')
flags.DEFINE_float('corr_frac', 0.0, 'Fraction of the input to corrupt.')
# Autoencoder layers specific parameters
Expand Down Expand Up @@ -149,7 +149,7 @@ def load_from_np(dataset_path):
sdae = None

sdae = stacked_deep_autoencoder.StackedDeepAutoencoder(
do_pretrain=FLAGS.do_pretrain,
do_pretrain=FLAGS.do_pretrain, model_name=FLAGS.model_name,
layers=dae_params['layers'], finetune_loss_func=FLAGS.finetune_loss_func,
finetune_learning_rate=FLAGS.finetune_learning_rate, finetune_num_epochs=FLAGS.finetune_num_epochs,
finetune_opt=FLAGS.finetune_opt, finetune_batch_size=FLAGS.finetune_batch_size, dropout=FLAGS.dropout,
Expand Down
5 changes: 3 additions & 2 deletions model.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,9 @@ def _create_cost_function_node(self, loss_func, model_output, ref_input):

with tf.name_scope("cost"):
if loss_func == 'cross_entropy':
cost = - tf.reduce_mean(ref_input * tf.log(model_output) +
(1 - ref_input) * tf.log(1 - model_output))
# cost = - tf.reduce_mean(ref_input * tf.log(model_output) +
# (1 - ref_input) * tf.log(1 - model_output))
cost = tf.reduce_mean(-tf.reduce_sum(ref_input * tf.log(model_output), reduction_indices=[1]))
_ = tf.scalar_summary("cross_entropy", cost)
self.cost = cost

Expand Down
5 changes: 3 additions & 2 deletions models/autoencoder_models/stacked_deep_autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,9 @@ def __init__(self, layers, model_name='un_sdae', main_dir='un_sdae/', enc_act_fu
self.autoencoders = []

for l, layer in enumerate(layers):
autoencoder_name = self.model_name + '-dae-' + str(l+1)
self.autoencoders.append(denoising_autoencoder.DenoisingAutoencoder(
n_components=layer, main_dir=self.main_dir,
n_components=layer, main_dir=self.main_dir, model_name=autoencoder_name,
enc_act_func=self.enc_act_func[l], dec_act_func=self.dec_act_func[l], loss_func=self.loss_func[l],
xavier_init=self.xavier_init[l], opt=self.opt[l], learning_rate=self.learning_rate[l],
momentum=self.momentum[l], corr_type=self.corr_type, corr_frac=self.corr_frac,
Expand Down Expand Up @@ -363,7 +364,7 @@ def _create_decoding_layers(self, last_encode):

next_decode = last_encode

for l, layer in enumerate(reversed(self.layers)):
for l, layer in reversed(list(enumerate(self.layers))):

with tf.name_scope("decode-{}".format(l)):

Expand Down

0 comments on commit f24edb2

Please sign in to comment.