forked from chiphuyen/stanford-tensorflow-tutorials
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path04_linreg_eager.py
84 lines (70 loc) · 2.94 KB
/
04_linreg_eager.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
""" Starter code for a simple regression example using eager execution.
Created by Akshay Agrawal ([email protected])
CS20: "TensorFlow for Deep Learning Research"
cs20.stanford.edu
Lecture 04
"""
import time
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import matplotlib.pyplot as plt
import utils
DATA_FILE = 'data/birth_life_2010.txt'
# In order to use eager execution, `tfe.enable_eager_execution()` must be
# called at the very beginning of a TensorFlow program.
tfe.enable_eager_execution()
# Read the data into a dataset.
data, n_samples = utils.read_birth_life_data(DATA_FILE)
dataset = tf.data.Dataset.from_tensor_slices((data[:,0], data[:,1]))
# Create variables.
w = tfe.Variable(0.0)
b = tfe.Variable(0.0)
# Define the linear predictor.
def prediction(x):
return x * w + b
# Define loss functions of the form: L(y, y_predicted)
def squared_loss(y, y_predicted):
return (y - y_predicted) ** 2
def huber_loss(y, y_predicted, m=1.0):
"""Huber loss."""
t = y - y_predicted
# Note that enabling eager execution lets you use Python control flow and
# specificy dynamic TensorFlow computations. Contrast this implementation
# to the graph-construction one found in `utils`, which uses `tf.cond`.
return t ** 2 if tf.abs(t) <= m else m * (2 * tf.abs(t) - m)
def train(loss_fn):
"""Train a regression model evaluated using `loss_fn`."""
print('Training; loss function: ' + loss_fn.__name__)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
# Define the function through which to differentiate.
def loss_for_example(x, y):
return loss_fn(y, prediction(x))
# `grad_fn(x_i, y_i)` returns (1) the value of `loss_for_example`
# evaluated at `x_i`, `y_i` and (2) the gradients of any variables used in
# calculating it.
grad_fn = tfe.implicit_value_and_gradients(loss_for_example)
start = time.time()
for epoch in range(100):
total_loss = 0.0
for x_i, y_i in tfe.Iterator(dataset):
loss, gradients = grad_fn(x_i, y_i)
# Take an optimization step and update variables.
optimizer.apply_gradients(gradients)
total_loss += loss
if epoch % 10 == 0:
print('Epoch {0}: {1}'.format(epoch, total_loss / n_samples))
print('Took: %f seconds' % (time.time() - start))
print('Eager execution exhibits significant overhead per operation. '
'As you increase your batch size, the impact of the overhead will '
'become less noticeable. Eager execution is under active development: '
'expect performance to increase substantially in the near future!')
train(huber_loss)
plt.plot(data[:,0], data[:,1], 'bo')
# The `.numpy()` method of a tensor retrieves the NumPy array backing it.
# In future versions of eager, you won't need to call `.numpy()` and will
# instead be able to, in most cases, pass Tensors wherever NumPy arrays are
# expected.
plt.plot(data[:,0], data[:,0] * w.numpy() + b.numpy(), 'r',
label="huber regression")
plt.legend()
plt.show()