@@ -65,7 +65,10 @@ def RNN(X, weights, biases):
65
65
##########################################
66
66
67
67
# basic LSTM Cell.
68
- lstm_cell = tf .nn .rnn_cell .BasicLSTMCell (n_hidden_units , forget_bias = 1.0 , state_is_tuple = True )
68
+ if int ((tf .__version__ ).split ('.' )[1 ]) < 12 and int ((tf .__version__ ).split ('.' )[0 ]) < 1 :
69
+ lstm_cell = tf .nn .rnn_cell .BasicLSTMCell (n_hidden_units , forget_bias = 1.0 , state_is_tuple = True )
70
+ else :
71
+ lstm_cell = tf .contrib .rnn .BasicLSTMCell (n_hidden_units )
69
72
# lstm cell is divided into two parts (c_state, h_state)
70
73
init_state = lstm_cell .zero_state (batch_size , dtype = tf .float32 )
71
74
@@ -85,14 +88,17 @@ def RNN(X, weights, biases):
85
88
86
89
# # or
87
90
# unpack to list [(batch, outputs)..] * steps
88
- outputs = tf .unpack (tf .transpose (outputs , [1 , 0 , 2 ])) # states is the last outputs
91
+ if int ((tf .__version__ ).split ('.' )[1 ]) < 12 and int ((tf .__version__ ).split ('.' )[0 ]) < 1 :
92
+ outputs = tf .unpack (tf .transpose (outputs , [1 , 0 , 2 ])) # states is the last outputs
93
+ else :
94
+ outputs = tf .unstack (tf .transpose (outputs , [1 ,0 ,2 ]))
89
95
results = tf .matmul (outputs [- 1 ], weights ['out' ]) + biases ['out' ]
90
96
91
97
return results
92
98
93
99
94
100
pred = RNN (x , weights , biases )
95
- cost = tf .reduce_mean (tf .nn .softmax_cross_entropy_with_logits (pred , y ))
101
+ cost = tf .reduce_mean (tf .nn .softmax_cross_entropy_with_logits (logits = pred , labels = y ))
96
102
train_op = tf .train .AdamOptimizer (lr ).minimize (cost )
97
103
98
104
correct_pred = tf .equal (tf .argmax (pred , 1 ), tf .argmax (y , 1 ))
0 commit comments