Skip to content

Commit

Permalink
Fix regressions caused by a previous change
Browse files Browse the repository at this point in the history
  • Loading branch information
nealwu committed Jan 20, 2017
1 parent e9e470d commit f63a80a
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 10 deletions.
4 changes: 2 additions & 2 deletions tutorials/rnn/translate/data_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,8 +239,8 @@ def data_to_token_ids(data_path, target_path, vocabulary_path,
counter += 1
if counter % 100000 == 0:
print(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(line, vocab, tokenizer,
normalize_digits)
token_ids = sentence_to_token_ids(tf.compat.as_bytes(line), vocab,
tokenizer, normalize_digits)
tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n")


Expand Down
21 changes: 13 additions & 8 deletions tutorials/rnn/translate/seq2seq_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,24 +108,29 @@ def sampled_loss(labels, inputs):
local_b = tf.cast(b, tf.float32)
local_inputs = tf.cast(inputs, tf.float32)
return tf.cast(
tf.nn.sampled_softmax_loss(local_w_t, local_b, local_inputs, labels,
num_samples, self.target_vocab_size),
tf.nn.sampled_softmax_loss(
weights=local_w_t,
biases=local_b,
labels=labels,
inputs=local_inputs,
num_sampled=num_samples,
num_classes=self.target_vocab_size),
dtype)
softmax_loss_function = sampled_loss

# Create the internal multi-layer cell for our RNN.
def single_cell():
return tf.nn.rnn_cell.GRUCell(size)
return tf.contrib.rnn.GRUCell(size)
if use_lstm:
def single_cell():
return tf.nn.rnn_cell.BasicLSTMCell(size)
return tf.contrib.rnn.BasicLSTMCell(size)
cell = single_cell()
if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell([single_cell() for _ in range(num_layers)])
cell = tf.contrib.rnn.MultiRNNCell([single_cell() for _ in range(num_layers)])

# The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return tf.nn.seq2seq.embedding_attention_seq2seq(
return tf.contrib.legacy_seq2seq.embedding_attention_seq2seq(
encoder_inputs,
decoder_inputs,
cell,
Expand Down Expand Up @@ -155,7 +160,7 @@ def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):

# Training outputs and losses.
if forward_only:
self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function)
Expand All @@ -167,7 +172,7 @@ def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
for output in self.outputs[b]
]
else:
self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets,
lambda x, y: seq2seq_f(x, y, False),
Expand Down

0 comments on commit f63a80a

Please sign in to comment.