Skip to content

Commit b7ea971

Browse files
committed
rnn lint
1 parent bf9717b commit b7ea971

File tree

5 files changed

+26
-23
lines changed

5 files changed

+26
-23
lines changed

chapter_recurrent-neural-networks/gru.md

+6-6
Original file line numberDiff line numberDiff line change
@@ -85,14 +85,14 @@ ctx = gb.try_gpu()
8585
def get_params():
8686
def _one(shape):
8787
return nd.random.normal(scale=0.01, shape=shape, ctx=ctx)
88-
88+
8989
def _three():
9090
return (_one((num_inputs, num_hiddens)),
9191
_one((num_hiddens, num_hiddens)),
92-
nd.zeros(num_hiddens, ctx=ctx))
93-
94-
W_xz, W_hz, b_z = _three() # 更新门参数。
95-
W_xr, W_hr, b_r = _three() # 重置门参数。
92+
nd.zeros(num_hiddens, ctx=ctx))
93+
94+
W_xz, W_hz, b_z = _three() # 更新门参数。
95+
W_xr, W_hr, b_r = _three() # 重置门参数。
9696
W_xh, W_hh, b_h = _three() # 候选隐藏状态参数。
9797
# 输出层参数。
9898
W_hq = _one((num_hiddens, num_outputs))
@@ -120,7 +120,7 @@ def gru(inputs, state, params):
120120
W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params
121121
H, = state
122122
outputs = []
123-
for X in inputs:
123+
for X in inputs:
124124
Z = nd.sigmoid(nd.dot(X, W_xz) + nd.dot(H, W_hz) + b_z)
125125
R = nd.sigmoid(nd.dot(X, W_xr) + nd.dot(H, W_hr) + b_r)
126126
H_tilda = nd.tanh(nd.dot(X, W_xh) + R * nd.dot(H, W_hh) + b_h)

chapter_recurrent-neural-networks/lang-model-dataset.md

+4-1
Original file line numberDiff line numberDiff line change
@@ -63,8 +63,11 @@ def data_iter_random(corpus_indices, batch_size, num_steps, ctx=None):
6363
epoch_size = num_examples // batch_size
6464
example_indices = list(range(num_examples))
6565
random.shuffle(example_indices)
66+
6667
# 返回从 pos 开始的长为 num_steps 的序列。
67-
_data = lambda pos: corpus_indices[pos: pos + num_steps]
68+
def _data(pos):
69+
return corpus_indices[pos: pos + num_steps]
70+
6871
for i in range(epoch_size):
6972
# 每次读取 batch_size 个随机样本。
7073
i = i * batch_size

chapter_recurrent-neural-networks/lstm.md

+7-7
Original file line numberDiff line numberDiff line change
@@ -91,15 +91,15 @@ from mxnet.gluon import rnn
9191
num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size
9292
ctx = gb.try_gpu()
9393
94-
def get_params():
94+
def get_params():
9595
def _one(shape):
9696
return nd.random.normal(scale=0.01, shape=shape, ctx=ctx)
97-
97+
9898
def _three():
9999
return (_one((num_inputs, num_hiddens)),
100100
_one((num_hiddens, num_hiddens)),
101-
nd.zeros(num_hiddens, ctx=ctx))
102-
101+
nd.zeros(num_hiddens, ctx=ctx))
102+
103103
W_xi, W_hi, b_i = _three() # 输入门参数。
104104
W_xf, W_hf, b_f = _three() # 遗忘门参数。
105105
W_xo, W_ho, b_o = _three() # 输出门参数。
@@ -121,7 +121,7 @@ def get_params():
121121

122122
```{.python .input n=3}
123123
def init_lstm_state(batch_size, num_hiddens, ctx):
124-
return (nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx),
124+
return (nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx),
125125
nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx))
126126
```
127127

@@ -133,7 +133,7 @@ def lstm(inputs, state, params):
133133
W_hq, b_q] = params
134134
(H, C) = state
135135
outputs = []
136-
for X in inputs:
136+
for X in inputs:
137137
I = nd.sigmoid(nd.dot(X, W_xi) + nd.dot(H, W_hi) + b_i)
138138
F = nd.sigmoid(nd.dot(X, W_xf) + nd.dot(H, W_hf) + b_f)
139139
O = nd.sigmoid(nd.dot(X, W_xo) + nd.dot(H, W_ho) + b_o)
@@ -172,7 +172,7 @@ gb.train_and_predict_rnn(lstm, get_params, init_lstm_state, num_hiddens,
172172
lstm_layer = rnn.LSTM(num_hiddens)
173173
model = gb.RNNModel(lstm_layer, vocab_size)
174174
gb.train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx,
175-
corpus_indices, idx_to_char, char_to_idx,
175+
corpus_indices, idx_to_char, char_to_idx,
176176
num_epochs, num_steps, lr, clipping_theta,
177177
batch_size, pred_period, pred_len, prefixes)
178178
```

chapter_recurrent-neural-networks/rnn-gluon.md

+7-7
Original file line numberDiff line numberDiff line change
@@ -101,9 +101,9 @@ predict_rnn_gluon('分开', 10, model, vocab_size, ctx, idx_to_char, char_to_idx
101101

102102
```{.python .input n=18}
103103
# 本函数已保存在 gluonbook 包中方便以后使用。
104-
def train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx,
105-
corpus_indices, idx_to_char, char_to_idx,
106-
num_epochs, num_steps, lr, clipping_theta,
104+
def train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx,
105+
corpus_indices, idx_to_char, char_to_idx,
106+
num_epochs, num_steps, lr, clipping_theta,
107107
batch_size, pred_period, pred_len, prefixes):
108108
loss = gloss.SoftmaxCrossEntropyLoss()
109109
model.initialize(ctx=ctx, force_reinit=True, init=init.Normal(0.01))
@@ -134,7 +134,7 @@ def train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx,
134134
epoch + 1, math.exp(loss_sum / (t + 1)), time.time() - start))
135135
for prefix in prefixes:
136136
print(' -', predict_rnn_gluon(
137-
prefix, pred_len, model, vocab_size,
137+
prefix, pred_len, model, vocab_size,
138138
ctx, idx_to_char, char_to_idx))
139139
```
140140

@@ -143,9 +143,9 @@ def train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx,
143143
```{.python .input n=19}
144144
num_epochs, batch_size, lr, clipping_theta = 200, 32, 1e2, 1e-2
145145
pred_period, pred_len, prefixes = 50, 50, ['分开', '不分开']
146-
train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx,
147-
corpus_indices, idx_to_char, char_to_idx,
148-
num_epochs, num_steps, lr, clipping_theta,
146+
train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx,
147+
corpus_indices, idx_to_char, char_to_idx,
148+
num_epochs, num_steps, lr, clipping_theta,
149149
batch_size, pred_period, pred_len, prefixes)
150150
```
151151

chapter_recurrent-neural-networks/rnn-scratch.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ len(outputs), outputs[0].shape, state_new[0].shape
102102
def predict_rnn(prefix, num_chars, rnn, params, init_rnn_state,
103103
num_hiddens, vocab_size, ctx, idx_to_char, char_to_idx):
104104
state = init_rnn_state(1, num_hiddens, ctx)
105-
output = [char_to_idx[prefix[0]]]
105+
output = [char_to_idx[prefix[0]]]
106106
for t in range(num_chars + len(prefix) - 1):
107107
# 将上一时间步的输出作为当前时间步的输入。
108108
X = to_onehot(nd.array([output[-1]], ctx=ctx), vocab_size)
@@ -173,7 +173,7 @@ def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
173173
if is_random_iter:
174174
data_iter_fn = gb.data_iter_random
175175
else:
176-
data_iter_fn = gb.data_iter_consecutive
176+
data_iter_fn = gb.data_iter_consecutive
177177
params = get_params()
178178
loss = gloss.SoftmaxCrossEntropyLoss()
179179

0 commit comments

Comments
 (0)