|
| 1 | +""" |
| 2 | +Asynchronous Advantage Actor Critic (A3C) with continuous action space, Reinforcement Learning. |
| 3 | +
|
| 4 | +The Pendulum example. Convergence promised, but difficult environment, this code hardly converge. |
| 5 | +
|
| 6 | +View more on [莫烦Python] : https://morvanzhou.github.io/tutorials/ |
| 7 | +
|
| 8 | +Using: |
| 9 | +tensorflow 1.0 |
| 10 | +gym 0.8.0 |
| 11 | +""" |
| 12 | + |
| 13 | +import multiprocessing |
| 14 | +import threading |
| 15 | +import tensorflow as tf |
| 16 | +import numpy as np |
| 17 | +import gym |
| 18 | +import os |
| 19 | +import shutil |
| 20 | +import matplotlib.pyplot as plt |
| 21 | + |
| 22 | +GAME = 'Pendulum-v0' |
| 23 | +OUTPUT_GRAPH = True |
| 24 | +LOG_DIR = './log' |
| 25 | +N_WORKERS = multiprocessing.cpu_count() |
| 26 | +MAX_EP_STEP = 400 |
| 27 | +MAX_GLOBAL_EP = 800 |
| 28 | +GLOBAL_NET_SCOPE = 'Global_Net' |
| 29 | +UPDATE_GLOBAL_ITER = 5 |
| 30 | +GAMMA = 0.9 |
| 31 | +ENTROPY_BETA = 0.01 |
| 32 | +LR_A = 0.0001 # learning rate for actor |
| 33 | +LR_C = 0.001 # learning rate for critic |
| 34 | +GLOBAL_RUNNING_R = [] |
| 35 | +GLOBAL_EP = 0 |
| 36 | + |
| 37 | +env = gym.make(GAME) |
| 38 | + |
| 39 | +N_S = env.observation_space.shape[0] |
| 40 | +N_A = env.action_space.shape[0] |
| 41 | +A_BOUND = [env.action_space.low, env.action_space.high] |
| 42 | + |
| 43 | + |
| 44 | +class ACNet(object): |
| 45 | + def __init__(self, scope, globalAC=None): |
| 46 | + |
| 47 | + if scope == GLOBAL_NET_SCOPE: # get global network |
| 48 | + with tf.variable_scope(scope): |
| 49 | + self.s = tf.placeholder(tf.float32, [None, N_S], 'S') |
| 50 | + self._build_net(N_A) |
| 51 | + self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor') |
| 52 | + self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic') |
| 53 | + else: # local net, calculate losses |
| 54 | + with tf.variable_scope(scope): |
| 55 | + self.s = tf.placeholder(tf.float32, [None, N_S], 'S') |
| 56 | + self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A') |
| 57 | + self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget') |
| 58 | + |
| 59 | + mu, sigma, self.v = self._build_net(N_A) |
| 60 | + |
| 61 | + td = tf.subtract(self.v_target, self.v, name='TD_error') |
| 62 | + with tf.name_scope('c_loss'): |
| 63 | + self.c_losses = tf.square(td) # shape (None, 1), use this to get sum of gradients over batch |
| 64 | + self.c_loss = tf.reduce_mean(self.c_losses) |
| 65 | + |
| 66 | + with tf.name_scope('wrap_a_out'): |
| 67 | + mu, sigma = mu * A_BOUND[1], sigma + 1e-4 |
| 68 | + |
| 69 | + normal_dist = tf.contrib.distributions.Normal(mu, sigma) |
| 70 | + |
| 71 | + with tf.name_scope('a_loss'): |
| 72 | + log_prob = normal_dist.log_prob(self.a_his) |
| 73 | + exp_v = log_prob * td |
| 74 | + entropy = normal_dist.entropy() # encourage exploration |
| 75 | + self.exp_v = ENTROPY_BETA * entropy + exp_v |
| 76 | + self.a_losses = -self.exp_v # shape (None, 1), use this to get sum of gradients over batch |
| 77 | + self.a_loss = tf.reduce_mean(self.a_losses) |
| 78 | + |
| 79 | + with tf.name_scope('choose_a'): # use local params to choose action |
| 80 | + self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), A_BOUND[0], A_BOUND[1]) |
| 81 | + with tf.name_scope('local_grad'): |
| 82 | + self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor') |
| 83 | + self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic') |
| 84 | + self.a_grads = tf.gradients(self.a_losses, self.a_params) # use losses will give accumulated sum of gradients |
| 85 | + self.c_grads = tf.gradients(self.c_losses, self.c_params) |
| 86 | + |
| 87 | + with tf.name_scope('sync'): |
| 88 | + with tf.name_scope('pull'): |
| 89 | + self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)] |
| 90 | + self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)] |
| 91 | + with tf.name_scope('push'): |
| 92 | + self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params)) |
| 93 | + self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params)) |
| 94 | + |
| 95 | + def _build_net(self, n_a): |
| 96 | + w_init = tf.random_normal_initializer(0., .1) |
| 97 | + with tf.variable_scope('critic'): # only critic controls the rnn update |
| 98 | + cell_size = 32 |
| 99 | + s = tf.expand_dims(self.s, axis=1, |
| 100 | + name='timely_input') # [time_step, feature] => [time_step, batch, feature] |
| 101 | + rnn_cell = tf.contrib.rnn.BasicRNNCell(cell_size) |
| 102 | + self.init_state = rnn_cell.zero_state(batch_size=1, dtype=tf.float32) |
| 103 | + outputs, final_output = tf.nn.dynamic_rnn( |
| 104 | + cell=rnn_cell, inputs=s, initial_state=self.init_state, time_major=True) |
| 105 | + cell_out = tf.reshape(outputs, [-1, cell_size], name='flatten_rnn_outputs') # joined state representation |
| 106 | + l_c = tf.layers.dense(cell_out, 50, tf.nn.relu6, kernel_initializer=w_init, name='lc') |
| 107 | + v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value |
| 108 | + |
| 109 | + with tf.variable_scope('actor'): # state representation is based on critic |
| 110 | + cell_out = tf.stop_gradient(cell_out, name='c_cell_out') # from what critic think it is |
| 111 | + l_a = tf.layers.dense(cell_out, 80, tf.nn.relu6, kernel_initializer=w_init, name='la') |
| 112 | + mu = tf.layers.dense(l_a, n_a, tf.nn.tanh, kernel_initializer=w_init, name='mu') |
| 113 | + sigma = tf.layers.dense(l_a, n_a, tf.nn.softplus, kernel_initializer=w_init, name='sigma') |
| 114 | + return mu, sigma, v |
| 115 | + |
| 116 | + def update_global(self, feed_dict): # run by a local |
| 117 | + state, _, _ = SESS.run( |
| 118 | + [self.init_state, self.update_a_op, self.update_c_op], |
| 119 | + feed_dict) # local grads applies to global net |
| 120 | + return state |
| 121 | + |
| 122 | + def pull_global(self): # run by a local |
| 123 | + SESS.run([self.pull_a_params_op, self.pull_c_params_op]) |
| 124 | + |
| 125 | + def choose_action(self, s): # run by a local |
| 126 | + s = s[np.newaxis, :] |
| 127 | + return SESS.run(self.A, {self.s: s})[0] |
| 128 | + |
| 129 | + |
| 130 | +class Worker(object): |
| 131 | + def __init__(self, name, globalAC): |
| 132 | + self.env = gym.make(GAME).unwrapped |
| 133 | + self.name = name |
| 134 | + self.AC = ACNet(name, globalAC) |
| 135 | + |
| 136 | + def work(self): |
| 137 | + global GLOBAL_RUNNING_R, GLOBAL_EP |
| 138 | + total_step = 1 |
| 139 | + buffer_s, buffer_a, buffer_r = [], [], [] |
| 140 | + while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP: |
| 141 | + s = self.env.reset() |
| 142 | + ep_r = 0 |
| 143 | + for ep_t in range(MAX_EP_STEP): |
| 144 | + if self.name == 'W_0': |
| 145 | + self.env.render() |
| 146 | + a = self.AC.choose_action(s) |
| 147 | + s_, r, done, info = self.env.step(a) |
| 148 | + done = True if ep_t == MAX_EP_STEP - 1 else False |
| 149 | + r /= 10 # normalize reward |
| 150 | + |
| 151 | + ep_r += r |
| 152 | + buffer_s.append(s) |
| 153 | + buffer_a.append(a) |
| 154 | + buffer_r.append(r) |
| 155 | + |
| 156 | + if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net |
| 157 | + if done: |
| 158 | + v_s_ = 0 # terminal |
| 159 | + else: |
| 160 | + v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0] |
| 161 | + buffer_v_target = [] |
| 162 | + for r in buffer_r[::-1]: # reverse buffer r |
| 163 | + v_s_ = r + GAMMA * v_s_ |
| 164 | + buffer_v_target.append(v_s_) |
| 165 | + buffer_v_target.reverse() |
| 166 | + |
| 167 | + buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target) |
| 168 | + |
| 169 | + feed_dict = { |
| 170 | + self.AC.s: buffer_s, |
| 171 | + self.AC.a_his: buffer_a, |
| 172 | + self.AC.v_target: buffer_v_target, |
| 173 | + # use zero initial state if is beginning of the episode |
| 174 | + } |
| 175 | + |
| 176 | + if ep_t > UPDATE_GLOBAL_ITER - 1: # not beginning of this episode |
| 177 | + feed_dict[self.AC.init_state] = state |
| 178 | + state = self.AC.update_global(feed_dict) |
| 179 | + buffer_s, buffer_a, buffer_r = [], [], [] |
| 180 | + self.AC.pull_global() |
| 181 | + |
| 182 | + s = s_ |
| 183 | + total_step += 1 |
| 184 | + if done: |
| 185 | + if len(GLOBAL_RUNNING_R) == 0: # record running episode reward |
| 186 | + GLOBAL_RUNNING_R.append(ep_r) |
| 187 | + else: |
| 188 | + GLOBAL_RUNNING_R.append(0.9 * GLOBAL_RUNNING_R[-1] + 0.1 * ep_r) |
| 189 | + print( |
| 190 | + self.name, |
| 191 | + "Ep:", GLOBAL_EP, |
| 192 | + "| Ep_r: %i" % GLOBAL_RUNNING_R[-1], |
| 193 | + ) |
| 194 | + GLOBAL_EP += 1 |
| 195 | + break |
| 196 | + |
| 197 | +if __name__ == "__main__": |
| 198 | + SESS = tf.Session() |
| 199 | + |
| 200 | + with tf.device("/cpu:0"): |
| 201 | + OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA') |
| 202 | + OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC') |
| 203 | + GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params |
| 204 | + workers = [] |
| 205 | + # Create worker |
| 206 | + for i in range(N_WORKERS): |
| 207 | + i_name = 'W_%i' % i # worker name |
| 208 | + workers.append(Worker(i_name, GLOBAL_AC)) |
| 209 | + |
| 210 | + COORD = tf.train.Coordinator() |
| 211 | + SESS.run(tf.global_variables_initializer()) |
| 212 | + |
| 213 | + if OUTPUT_GRAPH: |
| 214 | + if os.path.exists(LOG_DIR): |
| 215 | + shutil.rmtree(LOG_DIR) |
| 216 | + tf.summary.FileWriter(LOG_DIR, SESS.graph) |
| 217 | + |
| 218 | + worker_threads = [] |
| 219 | + for worker in workers: |
| 220 | + job = lambda: worker.work() |
| 221 | + t = threading.Thread(target=job) |
| 222 | + t.start() |
| 223 | + worker_threads.append(t) |
| 224 | + COORD.join(worker_threads) |
| 225 | + |
| 226 | + plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R) |
| 227 | + plt.xlabel('step') |
| 228 | + plt.ylabel('Total moving reward') |
| 229 | + plt.show() |
| 230 | + |
0 commit comments