Skip to content

Commit

Permalink
h rather than c should be fed into the next layer of LSTM (pytorch#222)
Browse files Browse the repository at this point in the history
  • Loading branch information
dtolpin authored and soumith committed Sep 25, 2017
1 parent 3648cbc commit ddf9e30
Showing 1 changed file with 5 additions and 5 deletions.
10 changes: 5 additions & 5 deletions time_sequence_prediction/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,12 @@ def forward(self, input, future = 0):

for i, input_t in enumerate(input.chunk(input.size(1), dim=1)):
h_t, c_t = self.lstm1(input_t, (h_t, c_t))
h_t2, c_t2 = self.lstm2(c_t, (h_t2, c_t2))
outputs += [c_t2]
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
outputs += [h_t2]
for i in range(future):# if we should predict the future
h_t, c_t = self.lstm1(c_t2, (h_t, c_t))
h_t2, c_t2 = self.lstm2(c_t, (h_t2, c_t2))
outputs += [c_t2]
h_t, c_t = self.lstm1(h_t2, (h_t, c_t))
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
outputs += [h_t2]
outputs = torch.stack(outputs, 1).squeeze(2)
return outputs

Expand Down

0 comments on commit ddf9e30

Please sign in to comment.