Skip to content

Commit 75c492b

Browse files
committed
add new
1 parent db7ccb5 commit 75c492b

File tree

5 files changed

+783
-3
lines changed

5 files changed

+783
-3
lines changed
+199
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,199 @@
1+
"""
2+
Asynchronous Advantage Actor Critic (A3C), Reinforcement Learning.
3+
4+
The Pendulum example. Version 1: convergence promised
5+
6+
View more on [莫烦Python] : https://morvanzhou.github.io/tutorials/
7+
8+
Using:
9+
tensorflow 1.0
10+
gym 0.8.0
11+
"""
12+
13+
import multiprocessing
14+
import threading
15+
import tensorflow as tf
16+
import numpy as np
17+
import gym
18+
import os
19+
import shutil
20+
21+
np.random.seed(2)
22+
tf.set_random_seed(2) # reproducible
23+
24+
GAME = 'Pendulum-v0'
25+
OUTPUT_GRAPH = False
26+
LOG_DIR = './log'
27+
N_WORKERS = multiprocessing.cpu_count()
28+
MAX_EP_STEP = 300
29+
MAX_GLOBAL_EP = 800
30+
GLOBAL_NET_SCOPE = 'Global_Net'
31+
UPDATE_GLOBAL_ITER = 5
32+
GAMMA = 0.9
33+
LR_A = 0.001 # learning rate for actor
34+
LR_C = 0.002 # learning rate for critic
35+
36+
env = gym.make(GAME)
37+
env.seed(1) # reproducible
38+
39+
N_S = env.observation_space.shape[0]
40+
N_A = env.action_space.shape[0]
41+
A_BOUND = [env.action_space.low, env.action_space.high]
42+
43+
44+
class ACNet(object):
45+
def __init__(self, scope, n_s, n_a,
46+
a_bound=None, sess=None,
47+
opt_a=None, opt_c=None, global_a_params=None, global_c_params=None):
48+
49+
if scope == GLOBAL_NET_SCOPE: # get global network
50+
with tf.variable_scope(scope):
51+
self.s = tf.placeholder(tf.float32, [None, n_s], 'S')
52+
self._build_net(n_a)
53+
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
54+
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
55+
else: # local net, calculate losses
56+
self.sess = sess
57+
with tf.variable_scope(scope):
58+
self.s = tf.placeholder(tf.float32, [None, n_s], 'S')
59+
self.a_his = tf.placeholder(tf.float32, [None, n_a], 'A')
60+
self.v_target = tf.placeholder(tf.float32, [None, 1], 'R')
61+
62+
mu, sigma, self.v = self._build_net(n_a)
63+
64+
td = tf.subtract(self.v_target, self.v, name='TD_error')
65+
with tf.name_scope('c_loss'):
66+
self.c_loss = tf.reduce_mean(tf.square(td))
67+
self.mu, self.sigma = tf.squeeze(mu * a_bound[1]), tf.squeeze(sigma + 1e-2)
68+
self.normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma)
69+
with tf.name_scope('a_loss'):
70+
log_prob = self.normal_dist.log_prob(self.a_his)
71+
self.exp_v = tf.reduce_mean(log_prob * td)
72+
self.exp_v += 0.01*self.normal_dist.entropy() # encourage exploration
73+
self.a_loss = -self.exp_v
74+
75+
with tf.name_scope('choose_a'): # use local params to choose action
76+
self.A = tf.clip_by_value(self.normal_dist.sample([1]), a_bound[0], a_bound[1])
77+
with tf.name_scope('local_grad'):
78+
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
79+
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
80+
self.a_grads = tf.gradients(self.a_loss, self.a_params) # get local gradients
81+
self.c_grads = tf.gradients(self.c_loss, self.c_params)
82+
83+
with tf.name_scope('sync'):
84+
with tf.name_scope('pull'):
85+
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, global_a_params)]
86+
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, global_c_params)]
87+
with tf.name_scope('push'):
88+
self.update_a_op = opt_a.apply_gradients(zip(self.a_grads, global_a_params))
89+
self.update_c_op = opt_c.apply_gradients(zip(self.c_grads, global_c_params))
90+
91+
def _build_net(self, n_a):
92+
w_init = tf.random_normal_initializer(0., .1)
93+
with tf.variable_scope('actor'):
94+
l_a = tf.layers.dense(self.s, 100, tf.nn.relu, kernel_initializer=w_init, name='la')
95+
mu = tf.layers.dense(l_a, n_a, tf.nn.tanh, kernel_initializer=w_init, name='mu')
96+
sigma = tf.layers.dense(l_a, n_a, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
97+
with tf.variable_scope('critic'):
98+
l_c = tf.layers.dense(self.s, 60, tf.nn.relu, kernel_initializer=w_init, name='lc')
99+
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
100+
return mu, sigma, v
101+
102+
def update_global(self, feed_dict): # run by a local
103+
self.sess.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
104+
105+
def pull_global(self): # run by a local
106+
self.sess.run([self.pull_a_params_op, self.pull_c_params_op])
107+
108+
def choose_action(self, s): # run by a local
109+
s = s[np.newaxis, :]
110+
return self.sess.run(self.A, {self.s: s})
111+
112+
113+
class Worker(object):
114+
def __init__(self, env, name, n_s, n_a, a_bound, sess, opt_a, opt_c, g_a_params, g_c_params):
115+
self.env = env
116+
self.sess = sess
117+
self.name = name
118+
self.AC = ACNet(name, n_s, n_a, a_bound, sess, opt_a, opt_c, g_a_params, g_c_params)
119+
120+
def work(self, update_iter, max_ep_step, gamma, coord):
121+
total_step = 1
122+
buffer_s, buffer_a, buffer_r, buffer_s_ = [], [], [], []
123+
while not coord.should_stop() and GLOBAL_EP.eval(self.sess) < MAX_GLOBAL_EP:
124+
s = self.env.reset()
125+
ep_r = 0
126+
for ep_t in range(max_ep_step):
127+
if self.name == 'W_0':
128+
self.env.render()
129+
a = self.AC.choose_action(s)
130+
s_, r, done, info = self.env.step(a)
131+
r /= 10
132+
ep_r += r
133+
buffer_s.append(s)
134+
buffer_a.append(a)
135+
buffer_r.append(r)
136+
buffer_s_.append(s_)
137+
138+
if total_step % update_iter == 0 or done: # update global and assign to local net
139+
buffer_s, buffer_a, buffer_r, buffer_s_ = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_r), np.vstack(buffer_s_)
140+
141+
v_next = self.sess.run(self.AC.v, {self.AC.s: buffer_s_})
142+
if done: v_next[-1, 0] = 0
143+
v_target = buffer_r + gamma * v_next
144+
145+
feed_dict = {
146+
self.AC.s: buffer_s,
147+
self.AC.a_his: buffer_a,
148+
self.AC.v_target: v_target,
149+
}
150+
self.AC.update_global(feed_dict)
151+
buffer_s, buffer_a, buffer_r, buffer_s_ = [], [], [], []
152+
self.AC.pull_global()
153+
154+
s = s_
155+
total_step += 1
156+
if ep_t == max_ep_step-1:
157+
print(
158+
self.name,
159+
"Ep:", GLOBAL_EP.eval(self.sess),
160+
"| Ep_r: %.2f" % ep_r,
161+
)
162+
sess.run(COUNT_GLOBAL_EP)
163+
break
164+
165+
if __name__ == "__main__":
166+
sess = tf.Session()
167+
168+
with tf.device("/cpu:0"):
169+
GLOBAL_EP = tf.Variable(0, dtype=tf.int32, name='global_ep', trainable=False)
170+
COUNT_GLOBAL_EP = tf.assign(GLOBAL_EP, tf.add(GLOBAL_EP, tf.constant(1), name='step_ep'))
171+
OPT_A = tf.train.RMSPropOptimizer(LR_A)
172+
OPT_C = tf.train.RMSPropOptimizer(LR_C)
173+
globalAC = ACNet(GLOBAL_NET_SCOPE, N_S, N_A) # we only need its params
174+
workers = []
175+
# Create worker
176+
for i in range(N_WORKERS):
177+
i_name = 'W_%i' % i # worker name
178+
workers.append(
179+
Worker(
180+
gym.make(GAME).unwrapped, i_name, N_S, N_A, A_BOUND, sess,
181+
OPT_A, OPT_C, globalAC.a_params, globalAC.c_params
182+
))
183+
184+
coord = tf.train.Coordinator()
185+
sess.run(tf.global_variables_initializer())
186+
187+
if OUTPUT_GRAPH:
188+
if os.path.exists(LOG_DIR):
189+
shutil.rmtree(LOG_DIR)
190+
tf.summary.FileWriter(LOG_DIR, sess.graph)
191+
192+
worker_threads = []
193+
for worker in workers:
194+
job = lambda: worker.work(UPDATE_GLOBAL_ITER, MAX_EP_STEP, GAMMA, coord)
195+
t = threading.Thread(target=job)
196+
t.start()
197+
worker_threads.append(t)
198+
coord.join(worker_threads)
199+
+194
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,194 @@
1+
"""
2+
Asynchronous Advantage Actor Critic (A3C), Reinforcement Learning.
3+
4+
The Pendulum example. Version 2: convergence not promised
5+
6+
View more on [莫烦Python] : https://morvanzhou.github.io/tutorials/
7+
8+
Using:
9+
tensorflow 1.0
10+
gym 0.8.0
11+
"""
12+
13+
import multiprocessing
14+
import threading
15+
import tensorflow as tf
16+
import numpy as np
17+
import gym
18+
import os
19+
import shutil
20+
21+
np.random.seed(2)
22+
tf.set_random_seed(2) # reproducible
23+
24+
GAME = 'Pendulum-v0'
25+
OUTPUT_GRAPH = False
26+
LOG_DIR = './log'
27+
N_WORKERS = multiprocessing.cpu_count()
28+
MAX_EP_STEP = 300
29+
MAX_GLOBAL_EP = 800
30+
GLOBAL_NET_SCOPE = 'Global_Net'
31+
UPDATE_GLOBAL_ITER = 5
32+
GAMMA = 0.9
33+
LR = 0.001 # learning rate for actor
34+
35+
env = gym.make(GAME)
36+
env.seed(1) # reproducible
37+
38+
N_S = env.observation_space.shape[0]
39+
N_A = env.action_space.shape[0]
40+
A_BOUND = [env.action_space.low, env.action_space.high]
41+
42+
43+
class ACNet(object):
44+
def __init__(self, scope, n_s, n_a,
45+
a_bound=None, sess=None,
46+
opt=None, global_params=None):
47+
48+
if scope == GLOBAL_NET_SCOPE: # get global network
49+
with tf.variable_scope(scope):
50+
self.s = tf.placeholder(tf.float32, [None, n_s], 'S')
51+
self._build_net(n_a)
52+
self.params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
53+
else: # local net, calculate losses
54+
self.sess = sess
55+
with tf.variable_scope(scope):
56+
self.s = tf.placeholder(tf.float32, [None, n_s], 'S')
57+
self.a_his = tf.placeholder(tf.float32, [None, n_a], 'A')
58+
self.v_target = tf.placeholder(tf.float32, [None, 1], 'R')
59+
60+
mu, sigma, self.v = self._build_net(n_a)
61+
62+
td = tf.subtract(self.v_target, self.v, name='TD_error')
63+
with tf.name_scope('c_loss'):
64+
self.c_loss = tf.reduce_mean(tf.square(td))
65+
self.mu, self.sigma = tf.squeeze(mu * a_bound[1]), tf.squeeze(sigma+1e-2)
66+
self.normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma)
67+
with tf.name_scope('a_loss'):
68+
log_prob = self.normal_dist.log_prob(self.a_his)
69+
self.exp_v = tf.reduce_mean(log_prob * td)
70+
self.exp_v += 0.01*self.normal_dist.entropy() # encourage exploration
71+
self.a_loss = -self.exp_v
72+
with tf.name_scope('total_loss'):
73+
self.loss = self.a_loss + 0.5*self.c_loss # if combine, hard to converge
74+
with tf.name_scope('choose_a'): # use local params to choose action
75+
self.A = tf.clip_by_value(self.normal_dist.sample([1]), a_bound[0], a_bound[1])
76+
77+
with tf.name_scope('local_grad'):
78+
self.params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
79+
self.grads = tf.gradients(self.loss, self.params) # get local gradients
80+
81+
with tf.name_scope('sync'):
82+
with tf.name_scope('pull'):
83+
self.pull_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.params, global_params)]
84+
with tf.name_scope('push'):
85+
self.update_op = opt.apply_gradients(zip(self.grads, global_params))
86+
87+
def _build_net(self, n_a):
88+
w_init = tf.random_normal_initializer(0., .1)
89+
l = tf.layers.dense(self.s, 100, tf.nn.relu, kernel_initializer=w_init, name='l1')
90+
l = tf.layers.dense(l, 100, tf.nn.relu, kernel_initializer=w_init, name='l2')
91+
mu = tf.layers.dense(l, n_a, tf.nn.tanh, kernel_initializer=w_init, name='mu')
92+
sigma = tf.layers.dense(l, n_a, tf.nn.sigmoid, kernel_initializer=w_init, name='sigma') # use sigmoid, don't need too large variance
93+
v = tf.layers.dense(l, 1, kernel_initializer=w_init, name='v') # state value
94+
return mu, sigma, v
95+
96+
def update_global(self, feed_dict): # run by a local
97+
self.sess.run([self.update_op], feed_dict) # local grads applies to global net
98+
99+
def pull_global(self): # run by a local
100+
self.sess.run([self.pull_params_op])
101+
102+
def choose_action(self, s): # run by a local
103+
s = s[np.newaxis, :]
104+
return self.sess.run(self.A, {self.s: s})
105+
106+
107+
class Worker(object):
108+
def __init__(self, env, name, n_s, n_a, a_bound, sess, opt, g_params):
109+
self.env = env
110+
self.sess = sess
111+
self.name = name
112+
self.AC = ACNet(name, n_s, n_a, a_bound, sess, opt, g_params)
113+
114+
def work(self, update_iter, max_ep_step, gamma, coord):
115+
total_step = 1
116+
buffer_s, buffer_a, buffer_r, buffer_s_ = [], [], [], []
117+
118+
while not coord.should_stop() and GLOBAL_EP.eval(self.sess) < MAX_GLOBAL_EP:
119+
s = self.env.reset()
120+
ep_r = 0
121+
for ep_t in range(max_ep_step):
122+
if self.name == 'W_0':
123+
self.env.render()
124+
a = self.AC.choose_action(s)
125+
s_, r, done, info = self.env.step(a)
126+
r /= 10
127+
ep_r += r
128+
buffer_s.append(s)
129+
buffer_a.append(a)
130+
buffer_r.append(r)
131+
buffer_s_.append(s_)
132+
133+
if total_step % update_iter == 0 or done: # update global and assign to local net
134+
buffer_s, buffer_a, buffer_r, buffer_s_ = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_r), np.vstack(buffer_s_)
135+
136+
v_next = self.sess.run(self.AC.v, {self.AC.s: buffer_s_})
137+
if done: v_next[-1, 0] = 0
138+
v_target = buffer_r + gamma * v_next
139+
140+
feed_dict = {
141+
self.AC.s: buffer_s,
142+
self.AC.a_his: buffer_a,
143+
self.AC.v_target: v_target,
144+
}
145+
self.AC.update_global(feed_dict)
146+
buffer_s, buffer_a, buffer_r, buffer_s_ = [], [], [], []
147+
self.AC.pull_global()
148+
149+
s = s_
150+
total_step += 1
151+
if ep_t == max_ep_step-1:
152+
print(
153+
self.name,
154+
"Ep:", GLOBAL_EP.eval(self.sess),
155+
"| Ep_r: %.2f" % ep_r,
156+
157+
)
158+
sess.run(COUNT_GLOBAL_EP)
159+
break
160+
161+
if __name__ == "__main__":
162+
sess = tf.Session()
163+
164+
with tf.device("/cpu:0"):
165+
GLOBAL_EP = tf.Variable(0, dtype=tf.int32, name='global_ep', trainable=False)
166+
COUNT_GLOBAL_EP = tf.assign(GLOBAL_EP, tf.add(GLOBAL_EP, tf.constant(1), name='step_ep'))
167+
OPT = tf.train.RMSPropOptimizer(LR)
168+
globalAC = ACNet(GLOBAL_NET_SCOPE, N_S, N_A) # we only need its params
169+
workers = []
170+
# Create worker
171+
for i in range(N_WORKERS):
172+
i_name = 'W_%i' % i # worker name
173+
workers.append(
174+
Worker(
175+
gym.make(GAME).unwrapped, i_name, N_S, N_A, A_BOUND, sess,
176+
OPT, globalAC.params
177+
))
178+
179+
coord = tf.train.Coordinator()
180+
sess.run(tf.global_variables_initializer())
181+
182+
if OUTPUT_GRAPH:
183+
if os.path.exists(LOG_DIR):
184+
shutil.rmtree(LOG_DIR)
185+
tf.summary.FileWriter(LOG_DIR, sess.graph)
186+
187+
worker_threads = []
188+
for worker in workers:
189+
job = lambda: worker.work(UPDATE_GLOBAL_ITER, MAX_EP_STEP, GAMMA, coord)
190+
t = threading.Thread(target=job)
191+
t.start()
192+
worker_threads.append(t)
193+
coord.join(worker_threads)
194+

0 commit comments

Comments
 (0)