Skip to content

Commit 6727271

Browse files
committed
upload keras tutorial codes
1 parent 890e292 commit 6727271

8 files changed

+490
-0
lines changed

kerasTUT/2-installation.py

+28
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
"""
2+
To know more or get code samples, please visit my website:
3+
https://morvanzhou.github.io/tutorials/
4+
Or search: 莫烦Python
5+
Thank you for supporting!
6+
"""
7+
8+
# please note, all tutorial code are running under python3.5.
9+
# If you use the version like python2.7, please modify the code accordingly
10+
11+
# 2 - Installation
12+
13+
"""
14+
---------------------------
15+
1. Make sure you have installed the following dependencies for Keras:
16+
- Numpy
17+
- Scipy
18+
19+
for install numpy and scipy, please refer to my video tutorial:
20+
https://www.youtube.com/watch?v=JauGYB-Bzuw&list=PLXO45tsB95cKKyC45gatc8wEc3Ue7BlI4&index=2
21+
---------------------------
22+
2. run 'pip install keras' in command line for python 2+
23+
Or 'pip3 install keras' for python 3+
24+
25+
If encounter the error related to permission, then use 'sudo pip install ***'
26+
---------------------------
27+
28+
"""

kerasTUT/3-backend.py

+50
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
"""
2+
To know more or get code samples, please visit my website:
3+
https://morvanzhou.github.io/tutorials/
4+
Or search: 莫烦Python
5+
Thank you for supporting!
6+
"""
7+
8+
# please note, all tutorial code are running under python3.5.
9+
# If you use the version like python2.7, please modify the code accordingly
10+
11+
# 3 - backend
12+
13+
14+
"""
15+
Details are showing in the video.
16+
17+
----------------------
18+
Method 1:
19+
If you have run Keras at least once, you will find the Keras configuration file at:
20+
21+
~/.keras/keras.json
22+
23+
If it isn't there, you can create it.
24+
25+
The default configuration file looks like this:
26+
27+
{
28+
"image_dim_ordering": "tf",
29+
"epsilon": 1e-07,
30+
"floatx": "float32",
31+
"backend": "theano"
32+
}
33+
34+
Simply change the field backend to either "theano" or "tensorflow",
35+
and Keras will use the new configuration next time you run any Keras code.
36+
----------------------------
37+
Method 2:
38+
39+
Call this line in terminal and change the BACKEND to tensorflow or theano:
40+
41+
KERAS_BACKEND=tensorflow python -c "from keras import backend"
42+
43+
---this is for python2+
44+
45+
KERAS_BACKEND=tensorflow python3 -c "from keras import backend"
46+
47+
---this is for python3+
48+
49+
"""
50+

kerasTUT/4-regressor_example.py

+50
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
"""
2+
To know more or get code samples, please visit my website:
3+
https://morvanzhou.github.io/tutorials/
4+
Or search: 莫烦Python
5+
Thank you for supporting!
6+
"""
7+
8+
# please note, all tutorial code are running under python3.5.
9+
# If you use the version like python2.7, please modify the code accordingly
10+
11+
# 4 - Regressor example
12+
13+
import numpy as np
14+
np.random.seed(1337) # for reproducibility
15+
from keras.models import Sequential
16+
from keras.layers import Dense
17+
import numpy as np
18+
import matplotlib.pyplot as plt
19+
20+
# create some data
21+
X = np.linspace(-1, 1, 200)
22+
np.random.shuffle(X) # randomize the data
23+
Y = 0.5 * X + 2 + np.random.normal(0, 0.05, (200, ))
24+
# plot data
25+
plt.scatter(X, Y)
26+
plt.show()
27+
28+
X_train, Y_train = X[:160], Y[:160] # first 160 data points
29+
X_test, Y_test = X[160:], Y[160:] # last 40 data points
30+
31+
# build a neural network from the 1st layer to the last layer
32+
model = Sequential()
33+
model.add(Dense(output_dim=1, input_dim=1))
34+
35+
# choose loss function and optimizing method
36+
model.compile(loss='mse', optimizer='sgd')
37+
38+
# training
39+
print('Training -----------')
40+
for step in range(301):
41+
cost = model.train_on_batch(X_train, Y_train)
42+
if step % 100 == 0:
43+
print('train cost: ', cost)
44+
45+
# test
46+
print('\nTesting ------------')
47+
cost = model.evaluate(X_test, Y_test, batch_size=40)
48+
print('test cost:', cost)
49+
W, b = model.layers[0].get_weights()
50+
print('Weights=', W, '\nbiases=', b)

kerasTUT/5-classifier_example.py

+58
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
"""
2+
To know more or get code samples, please visit my website:
3+
https://morvanzhou.github.io/tutorials/
4+
Or search: 莫烦Python
5+
Thank you for supporting!
6+
"""
7+
8+
# please note, all tutorial code are running under python3.5.
9+
# If you use the version like python2.7, please modify the code accordingly
10+
11+
# 5 - Classifier example
12+
13+
import numpy as np
14+
np.random.seed(1337) # for reproducibility
15+
from keras.datasets import mnist
16+
from keras.utils import np_utils
17+
from keras.models import Sequential
18+
from keras.layers import Dense, Activation
19+
from keras.optimizers import RMSprop
20+
21+
# download the mnist to the path '~/.keras/datasets/' if it is the first time to be called
22+
# X shape (60,000 28x28), y shape (10,000, )
23+
(X_train, y_train), (X_test, y_test) = mnist.load_data()
24+
25+
# data pre-processing
26+
X_train = X_train.reshape(X_train.shape[0], -1) / 255 # normalize
27+
X_test = X_test.reshape(X_test.shape[0], -1) / 255 # normalize
28+
y_train = np_utils.to_categorical(y_train, nb_classes=10)
29+
y_test = np_utils.to_categorical(y_test, nb_classes=10)
30+
31+
# Another way to build your neural net
32+
model = Sequential([
33+
Dense(32, input_dim=784),
34+
Activation('relu'),
35+
Dense(10),
36+
Activation('softmax'),
37+
])
38+
39+
# Another way to define your optimizer
40+
rmsprop = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
41+
42+
# We add metrics to get more results you want to see
43+
model.compile(optimizer=rmsprop,
44+
loss='categorical_crossentropy',
45+
metrics=['accuracy'])
46+
47+
print('Training ------------')
48+
# Another way to train the model
49+
model.fit(X_train, y_train, nb_epoch=2, batch_size=32)
50+
51+
print('\nTesting ------------')
52+
# Evaluate the model with the metrics we defined earlier
53+
loss, accuracy = model.evaluate(X_test, y_test)
54+
55+
print('test loss: ', loss)
56+
print('test accuracy: ', accuracy)
57+
58+

kerasTUT/6-CNN_example.py

+87
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
"""
2+
To know more or get code samples, please visit my website:
3+
https://morvanzhou.github.io/tutorials/
4+
Or search: 莫烦Python
5+
Thank you for supporting!
6+
"""
7+
8+
# please note, all tutorial code are running under python3.5.
9+
# If you use the version like python2.7, please modify the code accordingly
10+
11+
# 6 - CNN example
12+
13+
import numpy as np
14+
np.random.seed(1337) # for reproducibility
15+
from keras.datasets import mnist
16+
from keras.utils import np_utils
17+
from keras.models import Sequential
18+
from keras.layers import Dense, Activation, Convolution2D, MaxPooling2D, Flatten
19+
from keras.optimizers import Adam
20+
21+
# download the mnist to the path '~/.keras/datasets/' if it is the first time to be called
22+
# X shape (60,000 28x28), y shape (10,000, )
23+
(X_train, y_train), (X_test, y_test) = mnist.load_data()
24+
25+
# data pre-processing
26+
X_train = X_train.reshape(-1, 1, 28, 28)
27+
X_test = X_test.reshape(-1, 1, 28, 28)
28+
y_train = np_utils.to_categorical(y_train, nb_classes=10)
29+
y_test = np_utils.to_categorical(y_test, nb_classes=10)
30+
31+
# Another way to build your CNN
32+
model = Sequential()
33+
34+
# Conv layer 1 output shape (32, 28, 28)
35+
model.add(Convolution2D(
36+
nb_filter=32,
37+
nb_row=5,
38+
nb_col=5,
39+
border_mode='same', # Padding method
40+
input_shape=(1, # channels
41+
28, 28) # height & width
42+
))
43+
model.add(Activation('relu'))
44+
45+
# Pooling layer 1 (max pooling) output shape (32, 14, 14)
46+
model.add(MaxPooling2D(
47+
pool_size=(2, 2),
48+
strides=(2, 2),
49+
border_mode='same', # Padding method
50+
))
51+
52+
# Conv layer 2 output shape (64, 14, 14)
53+
model.add(Convolution2D(64, 5, 5, border_mode='same'))
54+
model.add(Activation('relu'))
55+
56+
# Pooling layer 2 (max pooling) output shape (64, 7, 7)
57+
model.add(MaxPooling2D(pool_size=(2, 2), border_mode='same'))
58+
59+
# Fully connected layer 1 input shape (64 * 7 * 7) = (3136), output shape (1024)
60+
model.add(Flatten())
61+
model.add(Dense(1024))
62+
model.add(Activation('relu'))
63+
64+
# Fully connected layer 2 to shape (10) for 10 classes
65+
model.add(Dense(10))
66+
model.add(Activation('softmax'))
67+
68+
# Another way to define your optimizer
69+
adam = Adam(lr=1e-4)
70+
71+
# We add metrics to get more results you want to see
72+
model.compile(optimizer=adam,
73+
loss='categorical_crossentropy',
74+
metrics=['accuracy'])
75+
76+
print('Training ------------')
77+
# Another way to train the model
78+
model.fit(X_train, y_train, nb_epoch=1, batch_size=32,)
79+
80+
print('\nTesting ------------')
81+
# Evaluate the model with the metrics we defined earlier
82+
loss, accuracy = model.evaluate(X_test, y_test)
83+
84+
print('\ntest loss: ', loss)
85+
print('\ntest accuracy: ', accuracy)
86+
87+

kerasTUT/7-RNN_Classifier_example.py

+75
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
"""
2+
To know more or get code samples, please visit my website:
3+
https://morvanzhou.github.io/tutorials/
4+
Or search: 莫烦Python
5+
Thank you for supporting!
6+
"""
7+
8+
# please note, all tutorial code are running under python3.5.
9+
# If you use the version like python2.7, please modify the code accordingly
10+
11+
# 8 - RNN Classifier example
12+
13+
import numpy as np
14+
np.random.seed(1337) # for reproducibility
15+
16+
from keras.datasets import mnist
17+
from keras.utils import np_utils
18+
from keras.models import Sequential
19+
from keras.layers import SimpleRNN, Activation, Dense
20+
from keras.optimizers import Adam
21+
22+
TIME_STEPS = 28 # same as the height of the image
23+
INPUT_SIZE = 28 # same as the width of the image
24+
BATCH_SIZE = 50
25+
BATCH_INDEX = 0
26+
OUTPUT_SIZE = 10
27+
CELL_SIZE = 50
28+
LR = 0.001
29+
30+
31+
# download the mnist to the path '~/.keras/datasets/' if it is the first time to be called
32+
# X shape (60,000 28x28), y shape (10,000, )
33+
(X_train, y_train), (X_test, y_test) = mnist.load_data()
34+
35+
# data pre-processing
36+
X_train = X_train.reshape(-1, 28, 28) / 255 # normalize
37+
X_test = X_test.reshape(-1, 28, 28) / 255 # normalize
38+
y_train = np_utils.to_categorical(y_train, nb_classes=10)
39+
y_test = np_utils.to_categorical(y_test, nb_classes=10)
40+
41+
# build RNN model
42+
model = Sequential()
43+
44+
# RNN cell
45+
model.add(SimpleRNN(
46+
batch_input_shape=(BATCH_SIZE, TIME_STEPS, INPUT_SIZE), # Or: input_dim=INPUT_SIZE, input_length=TIME_STEPS,
47+
output_dim=CELL_SIZE,
48+
))
49+
50+
# output layer
51+
model.add(Dense(OUTPUT_SIZE))
52+
model.add(Activation('softmax'))
53+
54+
# optimizer
55+
adam = Adam(LR)
56+
model.compile(optimizer=adam,
57+
loss='categorical_crossentropy',
58+
metrics=['accuracy'])
59+
60+
# training
61+
for step in range(4001):
62+
# data shape = (batch_num, steps, inputs/outputs)
63+
X_batch = X_train[BATCH_INDEX: BATCH_INDEX+BATCH_SIZE, :, :]
64+
Y_batch = y_train[BATCH_INDEX: BATCH_INDEX+BATCH_SIZE, :]
65+
cost = model.train_on_batch(X_batch, Y_batch)
66+
BATCH_INDEX += BATCH_SIZE
67+
BATCH_INDEX = 0 if BATCH_INDEX >= X_train.shape[0] else BATCH_INDEX
68+
69+
if step % 500 == 0:
70+
cost, accuracy = model.evaluate(X_test, y_test, batch_size=y_test.shape[0], verbose=False)
71+
print('test cost: ', cost, 'test accuracy: ', accuracy)
72+
73+
74+
75+

0 commit comments

Comments
 (0)