forked from LeQuangHuyUIT/RL-Bitcoin-trading-bot
-
Notifications
You must be signed in to change notification settings - Fork 0
/
RL-Bitcoin-trading-bot_3.py
314 lines (262 loc) · 13.7 KB
/
RL-Bitcoin-trading-bot_3.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
#================================================================
#
# File name : RL-Bitcoin-trading-bot_3.py
# Author : PyLessons
# Created date: 2020-12-20
# Website : https://pylessons.com/
# GitHub : https://github.com/pythonlessons/RL-Bitcoin-trading-bot
# Description : Trading Crypto with Reinforcement Learning #3
#
#================================================================
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import copy
import pandas as pd
import numpy as np
import random
from collections import deque
from tensorboardX import SummaryWriter
from tensorflow.keras.optimizers import Adam, RMSprop
from model import Actor_Model, Critic_Model
from utils import TradingGraph, Write_to_file
class CustomEnv:
# A custom Bitcoin trading environment
def __init__(self, df, initial_balance=1000, lookback_window_size=50, Render_range = 100):
# Define action space and state size and other custom parameters
self.df = df.dropna().reset_index()
self.df_total_steps = len(self.df)-1
self.initial_balance = initial_balance
self.lookback_window_size = lookback_window_size
self.Render_range = Render_range # render range in visualization
# Action space from 0 to 3, 0 is hold, 1 is buy, 2 is sell
self.action_space = np.array([0, 1, 2])
# Orders history contains the balance, net_worth, crypto_bought, crypto_sold, crypto_held values for the last lookback_window_size steps
self.orders_history = deque(maxlen=self.lookback_window_size)
# Market history contains the OHCL values for the last lookback_window_size prices
self.market_history = deque(maxlen=self.lookback_window_size)
# State size contains Market+Orders history for the last lookback_window_size steps
self.state_size = (self.lookback_window_size, 10)
# Neural Networks part bellow
self.lr = 0.00001
self.epochs = 1
self.normalize_value = 100000
self.optimizer = Adam
# Create Actor-Critic network model
self.Actor = Actor_Model(input_shape=self.state_size, action_space = self.action_space.shape[0], lr=self.lr, optimizer = self.optimizer)
self.Critic = Critic_Model(input_shape=self.state_size, action_space = self.action_space.shape[0], lr=self.lr, optimizer = self.optimizer)
# create tensorboard writer
def create_writer(self):
self.replay_count = 0
self.writer = SummaryWriter(comment="Crypto_trader")
# Reset the state of the environment to an initial state
def reset(self, env_steps_size = 0):
self.visualization = TradingGraph(Render_range=self.Render_range) # init visualization
self.trades = deque(maxlen=self.Render_range) # limited orders memory for visualization
self.balance = self.initial_balance
self.net_worth = self.initial_balance
self.prev_net_worth = self.initial_balance
self.crypto_held = 0
self.crypto_sold = 0
self.crypto_bought = 0
self.episode_orders = 0 # test
self.env_steps_size = env_steps_size
if env_steps_size > 0: # used for training dataset
self.start_step = random.randint(self.lookback_window_size, self.df_total_steps - env_steps_size)
self.end_step = self.start_step + env_steps_size
else: # used for testing dataset
self.start_step = self.lookback_window_size
self.end_step = self.df_total_steps
self.current_step = self.start_step
for i in reversed(range(self.lookback_window_size)):
current_step = self.current_step - i
self.orders_history.append([self.balance, self.net_worth, self.crypto_bought, self.crypto_sold, self.crypto_held])
self.market_history.append([self.df.loc[current_step, 'Open'],
self.df.loc[current_step, 'High'],
self.df.loc[current_step, 'Low'],
self.df.loc[current_step, 'Close'],
self.df.loc[current_step, 'Volume']
])
state = np.concatenate((self.market_history, self.orders_history), axis=1)
return state
# Get the data points for the given current_step
def _next_observation(self):
self.market_history.append([self.df.loc[self.current_step, 'Open'],
self.df.loc[self.current_step, 'High'],
self.df.loc[self.current_step, 'Low'],
self.df.loc[self.current_step, 'Close'],
self.df.loc[self.current_step, 'Volume']
])
obs = np.concatenate((self.market_history, self.orders_history), axis=1)
return obs
# Execute one time step within the environment
def step(self, action):
self.crypto_bought = 0
self.crypto_sold = 0
self.current_step += 1
# Set the current price to a random price between open and close
current_price = random.uniform(
self.df.loc[self.current_step, 'Open'],
self.df.loc[self.current_step, 'Close'])
Date = self.df.loc[self.current_step, 'Date'] # for visualization
High = self.df.loc[self.current_step, 'High'] # for visualization
Low = self.df.loc[self.current_step, 'Low'] # for visualization
if action == 0: # Hold
pass
elif action == 1 and self.balance > self.initial_balance/100:
# Buy with 100% of current balance
self.crypto_bought = self.balance / current_price
self.balance -= self.crypto_bought * current_price
self.crypto_held += self.crypto_bought
self.trades.append({'Date' : Date, 'High' : High, 'Low' : Low, 'total': self.crypto_bought, 'type': "buy"})
self.episode_orders += 1
elif action == 2 and self.crypto_held>0:
# Sell 100% of current crypto held
self.crypto_sold = self.crypto_held
self.balance += self.crypto_sold * current_price
self.crypto_held -= self.crypto_sold
self.trades.append({'Date' : Date, 'High' : High, 'Low' : Low, 'total': self.crypto_sold, 'type': "sell"})
self.episode_orders += 1
self.prev_net_worth = self.net_worth
self.net_worth = self.balance + self.crypto_held * current_price
self.orders_history.append([self.balance, self.net_worth, self.crypto_bought, self.crypto_sold, self.crypto_held])
#Write_to_file(Date, self.orders_history[-1])
# Calculate reward
reward = self.net_worth - self.prev_net_worth
if self.net_worth <= self.initial_balance/2:
done = True
else:
done = False
obs = self._next_observation() / self.normalize_value
return obs, reward, done
# render environment
def render(self, visualize = False):
#print(f'Step: {self.current_step}, Net Worth: {self.net_worth}')
if visualize:
Date = self.df.loc[self.current_step, 'Date']
Open = self.df.loc[self.current_step, 'Open']
Close = self.df.loc[self.current_step, 'Close']
High = self.df.loc[self.current_step, 'High']
Low = self.df.loc[self.current_step, 'Low']
Volume = self.df.loc[self.current_step, 'Volume']
# Render the environment to the screen
self.visualization.render(Date, Open, High, Low, Close, Volume, self.net_worth, self.trades)
def get_gaes(self, rewards, dones, values, next_values, gamma = 0.99, lamda = 0.95, normalize=True):
deltas = [r + gamma * (1 - d) * nv - v for r, d, nv, v in zip(rewards, dones, next_values, values)]
deltas = np.stack(deltas)
gaes = copy.deepcopy(deltas)
for t in reversed(range(len(deltas) - 1)):
gaes[t] = gaes[t] + (1 - dones[t]) * gamma * lamda * gaes[t + 1]
target = gaes + values
if normalize:
gaes = (gaes - gaes.mean()) / (gaes.std() + 1e-8)
return np.vstack(gaes), np.vstack(target)
def replay(self, states, actions, rewards, predictions, dones, next_states):
# reshape memory to appropriate shape for training
states = np.vstack(states)
next_states = np.vstack(next_states)
actions = np.vstack(actions)
predictions = np.vstack(predictions)
# Compute discounted rewards
#discounted_r = np.vstack(self.discount_rewards(rewards))
# Get Critic network predictions
values = self.Critic.predict(states)
next_values = self.Critic.predict(next_states)
# Compute advantages
#advantages = discounted_r - values
advantages, target = self.get_gaes(rewards, dones, np.squeeze(values), np.squeeze(next_values))
'''
pylab.plot(target,'-')
pylab.plot(advantages,'.')
ax=pylab.gca()
ax.grid(True)
pylab.show()
'''
# stack everything to numpy array
y_true = np.hstack([advantages, predictions, actions])
# training Actor and Critic networks
a_loss = self.Actor.Actor.fit(states, y_true, epochs=self.epochs, verbose=0, shuffle=True)
c_loss = self.Critic.Critic.fit(states, target, epochs=self.epochs, verbose=0, shuffle=True)
self.writer.add_scalar('Data/actor_loss_per_replay', np.sum(a_loss.history['loss']), self.replay_count)
self.writer.add_scalar('Data/critic_loss_per_replay', np.sum(c_loss.history['loss']), self.replay_count)
self.replay_count += 1
def act(self, state):
# Use the network to predict the next action to take, using the model
prediction = self.Actor.predict(np.expand_dims(state, axis=0))[0]
action = np.random.choice(self.action_space, p=prediction)
return action, prediction
def save(self, name="Crypto_trader"):
# save keras model weights
self.Actor.Actor.save_weights(f"{name}_Actor.h5")
self.Critic.Critic.save_weights(f"{name}_Critic.h5")
def load(self, name="Crypto_trader"):
# load keras model weights
self.Actor.Actor.load_weights(f"{name}_Actor.h5")
self.Critic.Critic.load_weights(f"{name}_Critic.h5")
def Random_games(env, visualize, train_episodes = 50):
average_net_worth = 0
for episode in range(train_episodes):
state = env.reset()
while True:
env.render(visualize)
action = np.random.randint(3, size=1)[0]
state, reward, done = env.step(action)
if env.current_step == env.end_step:
average_net_worth += env.net_worth
print("net_worth:", episode, env.net_worth)
break
print("average {} episodes random net_worth: {}".format(train_episodes, average_net_worth/train_episodes))
def train_agent(env, visualize=False, train_episodes = 50, training_batch_size=500):
env.create_writer() # create TensorBoard writer
total_average = deque(maxlen=100) # save recent 100 episodes net worth
best_average = 0 # used to track best average net worth
for episode in range(train_episodes):
state = env.reset(env_steps_size = training_batch_size)
states, actions, rewards, predictions, dones, next_states = [], [], [], [], [], []
for t in range(training_batch_size):
env.render(visualize)
action, prediction = env.act(state)
next_state, reward, done = env.step(action)
states.append(np.expand_dims(state, axis=0))
next_states.append(np.expand_dims(next_state, axis=0))
action_onehot = np.zeros(3)
action_onehot[action] = 1
actions.append(action_onehot)
rewards.append(reward)
dones.append(done)
predictions.append(prediction)
state = next_state
env.replay(states, actions, rewards, predictions, dones, next_states)
total_average.append(env.net_worth)
average = np.average(total_average)
env.writer.add_scalar('Data/average net_worth', average, episode)
env.writer.add_scalar('Data/episode_orders', env.episode_orders, episode)
print("net worth {} {:.2f} {:.2f} {}".format(episode, env.net_worth, average, env.episode_orders))
if episode > len(total_average):
if best_average < average:
best_average = average
print("Saving model")
env.save()
def test_agent(env, visualize=True, test_episodes=10):
env.load() # load the model
average_net_worth = 0
for episode in range(test_episodes):
state = env.reset()
while True:
env.render(visualize)
action, prediction = env.act(state)
state, reward, done = env.step(action)
if env.current_step == env.end_step:
average_net_worth += env.net_worth
print("net_worth:", episode, env.net_worth, env.episode_orders)
break
print("average {} episodes agent net_worth: {}".format(test_episodes, average_net_worth/test_episodes))
df = pd.read_csv('./pricedata.csv')
df = df.sort_values('Date')
lookback_window_size = 50
train_df = df[:-720-lookback_window_size]
test_df = df[-720-lookback_window_size:] # 30 days
train_env = CustomEnv(train_df, lookback_window_size=lookback_window_size)
test_env = CustomEnv(test_df, lookback_window_size=lookback_window_size)
#train_agent(train_env, visualize=False, train_episodes=20000, training_batch_size=500)
test_agent(test_env, visualize=True, test_episodes=1000)
Random_games(test_env, visualize=False, train_episodes = 1000)