Skip to content

Commit

Permalink
Merge pull request aymericdamien#85 from normanheckscher/master
Browse files Browse the repository at this point in the history
begin refactor for TF1.0
  • Loading branch information
aymericdamien authored Jan 18, 2017
2 parents 2f0c1ba + ab15e28 commit 9f93712
Show file tree
Hide file tree
Showing 22 changed files with 695 additions and 692 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ The following examples are coming from [TFLearn](https://github.com/tflearn/tfle

## Dependencies
```
tensorflow
tensorflow 1.0alpha
numpy
matplotlib
cuda
Expand Down
2 changes: 1 addition & 1 deletion examples/2_BasicModels/linear_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

# Initializing the variables
init = tf.initialize_all_variables()
init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
Expand Down
2 changes: 1 addition & 1 deletion examples/2_BasicModels/logistic_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

# Initializing the variables
init = tf.initialize_all_variables()
init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
Expand Down
4 changes: 2 additions & 2 deletions examples/2_BasicModels/nearest_neighbor.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,14 @@

# Nearest Neighbor calculation using L1 Distance
# Calculate L1 Distance
distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.neg(xte))), reduction_indices=1)
distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1)
# Prediction: Get min distance index (Nearest neighbor)
pred = tf.arg_min(distance, 0)

accuracy = 0.

# Initializing the variables
init = tf.initialize_all_variables()
init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
Expand Down
4 changes: 2 additions & 2 deletions examples/3_NeuralNetworks/autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)

# Parameters
learning_rate = 0.01
Expand Down Expand Up @@ -83,7 +83,7 @@ def decoder(x):
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)

# Initializing the variables
init = tf.initialize_all_variables()
init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
Expand Down
16 changes: 8 additions & 8 deletions examples/3_NeuralNetworks/bidirectional_rnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from __future__ import print_function

import tensorflow as tf
from tensorflow.python.ops import rnn, rnn_cell
from tensorflow.contrib import rnn
import numpy as np

# Import MNIST data
Expand Down Expand Up @@ -60,20 +60,20 @@ def BiRNN(x, weights, biases):
# Reshape to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, n_input])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.split(0, n_steps, x)
x = tf.split(x, n_steps, 0)

# Define lstm cells with tensorflow
# Forward direction cell
lstm_fw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Backward direction cell
lstm_bw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)

# Get lstm cell output
try:
outputs, _, _ = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)
except Exception: # Old TensorFlow version only returns outputs not states
outputs = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)

# Linear activation, using rnn inner loop last output
Expand All @@ -82,15 +82,15 @@ def BiRNN(x, weights, biases):
pred = BiRNN(x, weights, biases)

# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initializing the variables
init = tf.initialize_all_variables()
init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
Expand Down
4 changes: 2 additions & 2 deletions examples/3_NeuralNetworks/convolutional_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,15 +96,15 @@ def conv_net(x, weights, biases, dropout):
pred = conv_net(x, weights, biases, keep_prob)

# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initializing the variables
init = tf.initialize_all_variables()
init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
Expand Down
4 changes: 2 additions & 2 deletions examples/3_NeuralNetworks/multilayer_perceptron.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,11 @@ def multilayer_perceptron(x, weights, biases):
pred = multilayer_perceptron(x, weights, biases)

# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Initializing the variables
init = tf.initialize_all_variables()
init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
Expand Down
12 changes: 6 additions & 6 deletions examples/3_NeuralNetworks/recurrent_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from __future__ import print_function

import tensorflow as tf
from tensorflow.python.ops import rnn, rnn_cell
from tensorflow.contrib import rnn

# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
Expand Down Expand Up @@ -58,29 +58,29 @@ def RNN(x, weights, biases):
# Reshaping to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, n_input])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.split(0, n_steps, x)
x = tf.split(x, n_steps, 0)

# Define a lstm cell with tensorflow
lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)

# Get lstm cell output
outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']

pred = RNN(x, weights, biases)

# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initializing the variables
init = tf.initialize_all_variables()
init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
Expand Down
6 changes: 3 additions & 3 deletions examples/4_Utils/save_restore_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

import tensorflow as tf

Expand Down Expand Up @@ -60,11 +60,11 @@ def multilayer_perceptron(x, weights, biases):
pred = multilayer_perceptron(x, weights, biases)

# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Initializing the variables
init = tf.initialize_all_variables()
init = tf.global_variables_initializer()

# 'Saver' op to save and restore all the variables
saver = tf.train.Saver()
Expand Down
16 changes: 8 additions & 8 deletions examples/4_Utils/tensorboard_advanced.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,12 @@ def multilayer_perceptron(x, weights, biases):
layer_1 = tf.add(tf.matmul(x, weights['w1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Create a summary to visualize the first layer ReLU activation
tf.histogram_summary("relu1", layer_1)
tf.summary.histogram("relu1", layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Create another summary to visualize the second layer ReLU activation
tf.histogram_summary("relu2", layer_2)
tf.summary.histogram("relu2", layer_2)
# Output layer
out_layer = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3'])
return out_layer
Expand Down Expand Up @@ -91,24 +91,24 @@ def multilayer_perceptron(x, weights, biases):
init = tf.initialize_all_variables()

# Create a summary to monitor cost tensor
tf.scalar_summary("loss", loss)
tf.summary.scalar("loss", loss)
# Create a summary to monitor accuracy tensor
tf.scalar_summary("accuracy", acc)
tf.summary.scalar("accuracy", acc)
# Create summaries to visualize weights
for var in tf.trainable_variables():
tf.histogram_summary(var.name, var)
tf.summary.histogram(var.name, var)
# Summarize all gradients
for grad, var in grads:
tf.histogram_summary(var.name + '/gradient', grad)
tf.summary.histogram(var.name + '/gradient', grad)
# Merge all summaries into a single op
merged_summary_op = tf.merge_all_summaries()
merged_summary_op = tf.summary.merge_all()

# Launch the graph
with tf.Session() as sess:
sess.run(init)

# op to write logs to Tensorboard
summary_writer = tf.train.SummaryWriter(logs_path,
summary_writer = tf.summary.FileWriter(logs_path,
graph=tf.get_default_graph())

# Training cycle
Expand Down
8 changes: 4 additions & 4 deletions examples/4_Utils/tensorboard_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,18 +52,18 @@
init = tf.initialize_all_variables()

# Create a summary to monitor cost tensor
tf.scalar_summary("loss", cost)
tf.summary.scalar("loss", cost)
# Create a summary to monitor accuracy tensor
tf.scalar_summary("accuracy", acc)
tf.summary.scalar("accuracy", acc)
# Merge all summaries into a single op
merged_summary_op = tf.merge_all_summaries()
merged_summary_op = tf.summary.merge_all()

# Launch the graph
with tf.Session() as sess:
sess.run(init)

# op to write logs to Tensorboard
summary_writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph())
summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())

# Training cycle
for epoch in range(training_epochs):
Expand Down
Loading

0 comments on commit 9f93712

Please sign in to comment.