|
| 1 | +""" |
| 2 | +Asynchronous Advantage Actor Critic (A3C), Reinforcement Learning. |
| 3 | +
|
| 4 | +The Pendulum example. Version 1: convergence promised |
| 5 | +
|
| 6 | +View more on [莫烦Python] : https://morvanzhou.github.io/tutorials/ |
| 7 | +
|
| 8 | +Using: |
| 9 | +tensorflow 1.0 |
| 10 | +gym 0.8.0 |
| 11 | +""" |
| 12 | + |
| 13 | +import multiprocessing |
| 14 | +import threading |
| 15 | +import tensorflow as tf |
| 16 | +import numpy as np |
| 17 | +import gym |
| 18 | +import os |
| 19 | +import shutil |
| 20 | + |
| 21 | +np.random.seed(2) |
| 22 | +tf.set_random_seed(2) # reproducible |
| 23 | + |
| 24 | +GAME = 'Pendulum-v0' |
| 25 | +OUTPUT_GRAPH = False |
| 26 | +LOG_DIR = './log' |
| 27 | +N_WORKERS = multiprocessing.cpu_count() |
| 28 | +MAX_EP_STEP = 300 |
| 29 | +MAX_GLOBAL_EP = 800 |
| 30 | +GLOBAL_NET_SCOPE = 'Global_Net' |
| 31 | +UPDATE_GLOBAL_ITER = 5 |
| 32 | +GAMMA = 0.9 |
| 33 | +LR_A = 0.001 # learning rate for actor |
| 34 | +LR_C = 0.002 # learning rate for critic |
| 35 | + |
| 36 | +env = gym.make(GAME) |
| 37 | +env.seed(1) # reproducible |
| 38 | + |
| 39 | +N_S = env.observation_space.shape[0] |
| 40 | +N_A = env.action_space.shape[0] |
| 41 | +A_BOUND = [env.action_space.low, env.action_space.high] |
| 42 | + |
| 43 | + |
| 44 | +class ACNet(object): |
| 45 | + def __init__(self, scope, n_s, n_a, |
| 46 | + a_bound=None, sess=None, |
| 47 | + opt_a=None, opt_c=None, global_a_params=None, global_c_params=None): |
| 48 | + |
| 49 | + if scope == GLOBAL_NET_SCOPE: # get global network |
| 50 | + with tf.variable_scope(scope): |
| 51 | + self.s = tf.placeholder(tf.float32, [None, n_s], 'S') |
| 52 | + self._build_net(n_a) |
| 53 | + self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor') |
| 54 | + self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic') |
| 55 | + else: # local net, calculate losses |
| 56 | + self.sess = sess |
| 57 | + with tf.variable_scope(scope): |
| 58 | + self.s = tf.placeholder(tf.float32, [None, n_s], 'S') |
| 59 | + self.a_his = tf.placeholder(tf.float32, [None, n_a], 'A') |
| 60 | + self.v_target = tf.placeholder(tf.float32, [None, 1], 'R') |
| 61 | + |
| 62 | + mu, sigma, self.v = self._build_net(n_a) |
| 63 | + |
| 64 | + td = tf.subtract(self.v_target, self.v, name='TD_error') |
| 65 | + with tf.name_scope('c_loss'): |
| 66 | + self.c_loss = tf.reduce_mean(tf.square(td)) |
| 67 | + self.mu, self.sigma = tf.squeeze(mu * a_bound[1]), tf.squeeze(sigma + 1e-2) |
| 68 | + self.normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma) |
| 69 | + with tf.name_scope('a_loss'): |
| 70 | + log_prob = self.normal_dist.log_prob(self.a_his) |
| 71 | + self.exp_v = tf.reduce_mean(log_prob * td) |
| 72 | + self.exp_v += 0.01*self.normal_dist.entropy() # encourage exploration |
| 73 | + self.a_loss = -self.exp_v |
| 74 | + |
| 75 | + with tf.name_scope('choose_a'): # use local params to choose action |
| 76 | + self.A = tf.clip_by_value(self.normal_dist.sample([1]), a_bound[0], a_bound[1]) |
| 77 | + with tf.name_scope('local_grad'): |
| 78 | + self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor') |
| 79 | + self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic') |
| 80 | + self.a_grads = tf.gradients(self.a_loss, self.a_params) # get local gradients |
| 81 | + self.c_grads = tf.gradients(self.c_loss, self.c_params) |
| 82 | + |
| 83 | + with tf.name_scope('sync'): |
| 84 | + with tf.name_scope('pull'): |
| 85 | + self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, global_a_params)] |
| 86 | + self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, global_c_params)] |
| 87 | + with tf.name_scope('push'): |
| 88 | + self.update_a_op = opt_a.apply_gradients(zip(self.a_grads, global_a_params)) |
| 89 | + self.update_c_op = opt_c.apply_gradients(zip(self.c_grads, global_c_params)) |
| 90 | + |
| 91 | + def _build_net(self, n_a): |
| 92 | + w_init = tf.random_normal_initializer(0., .1) |
| 93 | + with tf.variable_scope('actor'): |
| 94 | + l_a = tf.layers.dense(self.s, 100, tf.nn.relu, kernel_initializer=w_init, name='la') |
| 95 | + mu = tf.layers.dense(l_a, n_a, tf.nn.tanh, kernel_initializer=w_init, name='mu') |
| 96 | + sigma = tf.layers.dense(l_a, n_a, tf.nn.softplus, kernel_initializer=w_init, name='sigma') |
| 97 | + with tf.variable_scope('critic'): |
| 98 | + l_c = tf.layers.dense(self.s, 60, tf.nn.relu, kernel_initializer=w_init, name='lc') |
| 99 | + v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value |
| 100 | + return mu, sigma, v |
| 101 | + |
| 102 | + def update_global(self, feed_dict): # run by a local |
| 103 | + self.sess.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net |
| 104 | + |
| 105 | + def pull_global(self): # run by a local |
| 106 | + self.sess.run([self.pull_a_params_op, self.pull_c_params_op]) |
| 107 | + |
| 108 | + def choose_action(self, s): # run by a local |
| 109 | + s = s[np.newaxis, :] |
| 110 | + return self.sess.run(self.A, {self.s: s}) |
| 111 | + |
| 112 | + |
| 113 | +class Worker(object): |
| 114 | + def __init__(self, env, name, n_s, n_a, a_bound, sess, opt_a, opt_c, g_a_params, g_c_params): |
| 115 | + self.env = env |
| 116 | + self.sess = sess |
| 117 | + self.name = name |
| 118 | + self.AC = ACNet(name, n_s, n_a, a_bound, sess, opt_a, opt_c, g_a_params, g_c_params) |
| 119 | + |
| 120 | + def work(self, update_iter, max_ep_step, gamma, coord): |
| 121 | + total_step = 1 |
| 122 | + buffer_s, buffer_a, buffer_r, buffer_s_ = [], [], [], [] |
| 123 | + while not coord.should_stop() and GLOBAL_EP.eval(self.sess) < MAX_GLOBAL_EP: |
| 124 | + s = self.env.reset() |
| 125 | + ep_r = 0 |
| 126 | + for ep_t in range(max_ep_step): |
| 127 | + if self.name == 'W_0': |
| 128 | + self.env.render() |
| 129 | + a = self.AC.choose_action(s) |
| 130 | + s_, r, done, info = self.env.step(a) |
| 131 | + r /= 10 |
| 132 | + ep_r += r |
| 133 | + buffer_s.append(s) |
| 134 | + buffer_a.append(a) |
| 135 | + buffer_r.append(r) |
| 136 | + buffer_s_.append(s_) |
| 137 | + |
| 138 | + if total_step % update_iter == 0 or done: # update global and assign to local net |
| 139 | + buffer_s, buffer_a, buffer_r, buffer_s_ = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_r), np.vstack(buffer_s_) |
| 140 | + |
| 141 | + v_next = self.sess.run(self.AC.v, {self.AC.s: buffer_s_}) |
| 142 | + if done: v_next[-1, 0] = 0 |
| 143 | + v_target = buffer_r + gamma * v_next |
| 144 | + |
| 145 | + feed_dict = { |
| 146 | + self.AC.s: buffer_s, |
| 147 | + self.AC.a_his: buffer_a, |
| 148 | + self.AC.v_target: v_target, |
| 149 | + } |
| 150 | + self.AC.update_global(feed_dict) |
| 151 | + buffer_s, buffer_a, buffer_r, buffer_s_ = [], [], [], [] |
| 152 | + self.AC.pull_global() |
| 153 | + |
| 154 | + s = s_ |
| 155 | + total_step += 1 |
| 156 | + if ep_t == max_ep_step-1: |
| 157 | + print( |
| 158 | + self.name, |
| 159 | + "Ep:", GLOBAL_EP.eval(self.sess), |
| 160 | + "| Ep_r: %.2f" % ep_r, |
| 161 | + ) |
| 162 | + sess.run(COUNT_GLOBAL_EP) |
| 163 | + break |
| 164 | + |
| 165 | +if __name__ == "__main__": |
| 166 | + sess = tf.Session() |
| 167 | + |
| 168 | + with tf.device("/cpu:0"): |
| 169 | + GLOBAL_EP = tf.Variable(0, dtype=tf.int32, name='global_ep', trainable=False) |
| 170 | + COUNT_GLOBAL_EP = tf.assign(GLOBAL_EP, tf.add(GLOBAL_EP, tf.constant(1), name='step_ep')) |
| 171 | + OPT_A = tf.train.RMSPropOptimizer(LR_A) |
| 172 | + OPT_C = tf.train.RMSPropOptimizer(LR_C) |
| 173 | + globalAC = ACNet(GLOBAL_NET_SCOPE, N_S, N_A) # we only need its params |
| 174 | + workers = [] |
| 175 | + # Create worker |
| 176 | + for i in range(N_WORKERS): |
| 177 | + i_name = 'W_%i' % i # worker name |
| 178 | + workers.append( |
| 179 | + Worker( |
| 180 | + gym.make(GAME).unwrapped, i_name, N_S, N_A, A_BOUND, sess, |
| 181 | + OPT_A, OPT_C, globalAC.a_params, globalAC.c_params |
| 182 | + )) |
| 183 | + |
| 184 | + coord = tf.train.Coordinator() |
| 185 | + sess.run(tf.global_variables_initializer()) |
| 186 | + |
| 187 | + if OUTPUT_GRAPH: |
| 188 | + if os.path.exists(LOG_DIR): |
| 189 | + shutil.rmtree(LOG_DIR) |
| 190 | + tf.summary.FileWriter(LOG_DIR, sess.graph) |
| 191 | + |
| 192 | + worker_threads = [] |
| 193 | + for worker in workers: |
| 194 | + job = lambda: worker.work(UPDATE_GLOBAL_ITER, MAX_EP_STEP, GAMMA, coord) |
| 195 | + t = threading.Thread(target=job) |
| 196 | + t.start() |
| 197 | + worker_threads.append(t) |
| 198 | + coord.join(worker_threads) |
| 199 | + |
0 commit comments