forked from kchua/handful-of-trials
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtf_test.py
executable file
·127 lines (107 loc) · 4.36 KB
/
tf_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import numpy as np
import tensorflow as tf
from tensorflow_forward_ad import forward_gradients
# ones = np.ones((10,10))
# v = tf.get_variable('asdf', shape=[10,10], dtype=tf.float32)
# old_state = v.read_value() # equivalent to tf.identity
# with tf.control_dependencies([old_state]):
# reward = tf.assign_add(v, ones)
# with tf.Session() as sess:
# tf.global_variables_initializer().run()
# a, b = sess.run([reward, old_state])
# print(v.eval().sum())
# print(a.sum(), b.sum())
'''
# Automatic differentiation.
w = tf.Variable(2.0, dtype=tf.float32, name="w")
x = tf.constant(1.0)
eps = tf.Variable(1.0, dtype=tf.float32, name="epsilon")
y = tf.multiply(w, x)
L = (tf.square(tf.subtract(y,1)))#*eps
# dL_dw = forward_gradients(L, w)
opt = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = opt.minimize(L, var_list=w)
sess = tf.Session()
print("=============flg1================")
sess.run(train_op)
print("=============flg2================")
print(w.eval())
'''
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
import tensorflow as tf
# Parameters
learning_rate = 0.1
num_steps = 500
batch_size = 128
display_step = 100
# Network Parameters
n_hidden_1 = 256 # 1st layer number of neurons
n_hidden_2 = 256 # 2nd layer number of neurons
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
X = tf.placeholder("float", [None, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([num_classes]))
}
# Create model
def neural_net(x):
# Hidden fully connected layer with 256 neurons
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
# Hidden fully connected layer with 256 neurons
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
# Output fully connected layer with a neuron for each class
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Construct model
logits = neural_net(X)
## sample weights
eps = tf.Variable(1.3, dtype=tf.float32, name="epsilon")
# Define loss and optimizer
loss_op = tf.multiply(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y)), eps)
loss_valid = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op, var_list=[weights, biases])
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
for step in range(1, num_steps+1):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
# dL_deps = forward_gradients(loss_valid, eps)
# print("grad_eps: ", dL_deps)
# print("grad_eps: ", sess.run(dL_deps[0],feed_dict={X: batch_x,
# Y: batch_y}))
# print(eps.eval())
print("Optimization Finished!")
# Calculate accuracy for MNIST test images
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={X: mnist.test.images,
Y: mnist.test.labels}))