-
Notifications
You must be signed in to change notification settings - Fork 139
/
Copy pathgenerative_text.py
121 lines (89 loc) · 3.24 KB
/
generative_text.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import keras
from keras import layers
import numpy as np
import random
import sys
from keras.callbacks import ModelCheckpoint
# Gather data
path = keras.utils.get_file(
'sample.txt',
origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt')
text = open(path).read().lower()
print('Number of words in corpus:', len(text))
'''
Data pre-processing
'''
# Length of extracted character sequences
maxlen = 100
# We sample a new sequence every `step` characters
step = 5
sentences = []
next_chars = []
# Extracting sentences and the next characters.
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('Number of sentences:', len(sentences))
# Extracting unique characters from the corpus
chars = sorted(list(set(text)))
print('Number of unique characters:', len(chars))
# Dictionary for mapping unique characters to their index
char_indices = dict((char, chars.index(char)) for char in chars)
# Converting characters into one-hot encoding.
x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
x[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
'''
Defining Model
'''
model = keras.models.Sequential()
model.add(layers.LSTM(128, input_shape=(maxlen, len(chars))))
model.add(layers.Dense(len(chars), activation='softmax'))
optimizer = keras.optimizers.RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
'''
Training Model
'''
def sample(preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# define the checkpoint
filepath="weights-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
for epoch in range(1, 60):
print('epoch', epoch)
# Fit the model for 1 epoch
model.fit(x, y,
batch_size=128,
epochs=1,
callbacks=callbacks_list)
# Select a text seed randomly
start_index = random.randint(0, len(text) - maxlen - 1)
generated_text = text[start_index: start_index + maxlen]
print('--- Seeded text: "' + generated_text + '"')
for temperature in [0.2, 0.5, 1.0, 1.2]:
print('------Selected temperature:', temperature)
sys.stdout.write(generated_text)
# We generate 400 characters
for i in range(400):
sampled = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(generated_text):
sampled[0, t, char_indices[char]] = 1.
preds = model.predict(sampled, verbose=0)[0]
next_index = sample(preds, temperature)
next_char = chars[next_index]
generated_text += next_char
generated_text = generated_text[1:]
sys.stdout.write(next_char)
sys.stdout.flush()
print()