Skip to content

Commit 9352eec

Browse files
committed
improve
1 parent 29c4c9a commit 9352eec

File tree

7 files changed

+557
-93
lines changed

7 files changed

+557
-93
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,230 @@
1+
"""
2+
Asynchronous Advantage Actor Critic (A3C) with continuous action space, Reinforcement Learning.
3+
4+
The Pendulum example. Convergence promised, but difficult environment, this code hardly converge.
5+
6+
View more on [莫烦Python] : https://morvanzhou.github.io/tutorials/
7+
8+
Using:
9+
tensorflow 1.0
10+
gym 0.8.0
11+
"""
12+
13+
import multiprocessing
14+
import threading
15+
import tensorflow as tf
16+
import numpy as np
17+
import gym
18+
import os
19+
import shutil
20+
import matplotlib.pyplot as plt
21+
22+
GAME = 'Pendulum-v0'
23+
OUTPUT_GRAPH = True
24+
LOG_DIR = './log'
25+
N_WORKERS = multiprocessing.cpu_count()
26+
MAX_EP_STEP = 400
27+
MAX_GLOBAL_EP = 800
28+
GLOBAL_NET_SCOPE = 'Global_Net'
29+
UPDATE_GLOBAL_ITER = 5
30+
GAMMA = 0.9
31+
ENTROPY_BETA = 0.01
32+
LR_A = 0.0001 # learning rate for actor
33+
LR_C = 0.001 # learning rate for critic
34+
GLOBAL_RUNNING_R = []
35+
GLOBAL_EP = 0
36+
37+
env = gym.make(GAME)
38+
39+
N_S = env.observation_space.shape[0]
40+
N_A = env.action_space.shape[0]
41+
A_BOUND = [env.action_space.low, env.action_space.high]
42+
43+
44+
class ACNet(object):
45+
def __init__(self, scope, globalAC=None):
46+
47+
if scope == GLOBAL_NET_SCOPE: # get global network
48+
with tf.variable_scope(scope):
49+
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
50+
self._build_net(N_A)
51+
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
52+
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
53+
else: # local net, calculate losses
54+
with tf.variable_scope(scope):
55+
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
56+
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A')
57+
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
58+
59+
mu, sigma, self.v = self._build_net(N_A)
60+
61+
td = tf.subtract(self.v_target, self.v, name='TD_error')
62+
with tf.name_scope('c_loss'):
63+
self.c_losses = tf.square(td) # shape (None, 1), use this to get sum of gradients over batch
64+
self.c_loss = tf.reduce_mean(self.c_losses)
65+
66+
with tf.name_scope('wrap_a_out'):
67+
mu, sigma = mu * A_BOUND[1], sigma + 1e-4
68+
69+
normal_dist = tf.contrib.distributions.Normal(mu, sigma)
70+
71+
with tf.name_scope('a_loss'):
72+
log_prob = normal_dist.log_prob(self.a_his)
73+
exp_v = log_prob * td
74+
entropy = normal_dist.entropy() # encourage exploration
75+
self.exp_v = ENTROPY_BETA * entropy + exp_v
76+
self.a_losses = -self.exp_v # shape (None, 1), use this to get sum of gradients over batch
77+
self.a_loss = tf.reduce_mean(self.a_losses)
78+
79+
with tf.name_scope('choose_a'): # use local params to choose action
80+
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), A_BOUND[0], A_BOUND[1])
81+
with tf.name_scope('local_grad'):
82+
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
83+
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
84+
self.a_grads = tf.gradients(self.a_losses, self.a_params) # use losses will give accumulated sum of gradients
85+
self.c_grads = tf.gradients(self.c_losses, self.c_params)
86+
87+
with tf.name_scope('sync'):
88+
with tf.name_scope('pull'):
89+
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
90+
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
91+
with tf.name_scope('push'):
92+
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
93+
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
94+
95+
def _build_net(self, n_a):
96+
w_init = tf.random_normal_initializer(0., .1)
97+
with tf.variable_scope('critic'): # only critic controls the rnn update
98+
cell_size = 32
99+
s = tf.expand_dims(self.s, axis=1,
100+
name='timely_input') # [time_step, feature] => [time_step, batch, feature]
101+
rnn_cell = tf.contrib.rnn.BasicRNNCell(cell_size)
102+
self.init_state = rnn_cell.zero_state(batch_size=1, dtype=tf.float32)
103+
outputs, final_output = tf.nn.dynamic_rnn(
104+
cell=rnn_cell, inputs=s, initial_state=self.init_state, time_major=True)
105+
cell_out = tf.reshape(outputs, [-1, cell_size], name='flatten_rnn_outputs') # joined state representation
106+
l_c = tf.layers.dense(cell_out, 50, tf.nn.relu6, kernel_initializer=w_init, name='lc')
107+
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
108+
109+
with tf.variable_scope('actor'): # state representation is based on critic
110+
cell_out = tf.stop_gradient(cell_out, name='c_cell_out') # from what critic think it is
111+
l_a = tf.layers.dense(cell_out, 80, tf.nn.relu6, kernel_initializer=w_init, name='la')
112+
mu = tf.layers.dense(l_a, n_a, tf.nn.tanh, kernel_initializer=w_init, name='mu')
113+
sigma = tf.layers.dense(l_a, n_a, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
114+
return mu, sigma, v
115+
116+
def update_global(self, feed_dict): # run by a local
117+
state, _, _ = SESS.run(
118+
[self.init_state, self.update_a_op, self.update_c_op],
119+
feed_dict) # local grads applies to global net
120+
return state
121+
122+
def pull_global(self): # run by a local
123+
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
124+
125+
def choose_action(self, s): # run by a local
126+
s = s[np.newaxis, :]
127+
return SESS.run(self.A, {self.s: s})[0]
128+
129+
130+
class Worker(object):
131+
def __init__(self, name, globalAC):
132+
self.env = gym.make(GAME).unwrapped
133+
self.name = name
134+
self.AC = ACNet(name, globalAC)
135+
136+
def work(self):
137+
global GLOBAL_RUNNING_R, GLOBAL_EP
138+
total_step = 1
139+
buffer_s, buffer_a, buffer_r = [], [], []
140+
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
141+
s = self.env.reset()
142+
ep_r = 0
143+
for ep_t in range(MAX_EP_STEP):
144+
if self.name == 'W_0':
145+
self.env.render()
146+
a = self.AC.choose_action(s)
147+
s_, r, done, info = self.env.step(a)
148+
done = True if ep_t == MAX_EP_STEP - 1 else False
149+
r /= 10 # normalize reward
150+
151+
ep_r += r
152+
buffer_s.append(s)
153+
buffer_a.append(a)
154+
buffer_r.append(r)
155+
156+
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
157+
if done:
158+
v_s_ = 0 # terminal
159+
else:
160+
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
161+
buffer_v_target = []
162+
for r in buffer_r[::-1]: # reverse buffer r
163+
v_s_ = r + GAMMA * v_s_
164+
buffer_v_target.append(v_s_)
165+
buffer_v_target.reverse()
166+
167+
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
168+
169+
feed_dict = {
170+
self.AC.s: buffer_s,
171+
self.AC.a_his: buffer_a,
172+
self.AC.v_target: buffer_v_target,
173+
# use zero initial state if is beginning of the episode
174+
}
175+
176+
if ep_t > UPDATE_GLOBAL_ITER - 1: # not beginning of this episode
177+
feed_dict[self.AC.init_state] = state
178+
state = self.AC.update_global(feed_dict)
179+
buffer_s, buffer_a, buffer_r = [], [], []
180+
self.AC.pull_global()
181+
182+
s = s_
183+
total_step += 1
184+
if done:
185+
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
186+
GLOBAL_RUNNING_R.append(ep_r)
187+
else:
188+
GLOBAL_RUNNING_R.append(0.9 * GLOBAL_RUNNING_R[-1] + 0.1 * ep_r)
189+
print(
190+
self.name,
191+
"Ep:", GLOBAL_EP,
192+
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
193+
)
194+
GLOBAL_EP += 1
195+
break
196+
197+
if __name__ == "__main__":
198+
SESS = tf.Session()
199+
200+
with tf.device("/cpu:0"):
201+
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
202+
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
203+
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
204+
workers = []
205+
# Create worker
206+
for i in range(N_WORKERS):
207+
i_name = 'W_%i' % i # worker name
208+
workers.append(Worker(i_name, GLOBAL_AC))
209+
210+
COORD = tf.train.Coordinator()
211+
SESS.run(tf.global_variables_initializer())
212+
213+
if OUTPUT_GRAPH:
214+
if os.path.exists(LOG_DIR):
215+
shutil.rmtree(LOG_DIR)
216+
tf.summary.FileWriter(LOG_DIR, SESS.graph)
217+
218+
worker_threads = []
219+
for worker in workers:
220+
job = lambda: worker.work()
221+
t = threading.Thread(target=job)
222+
t.start()
223+
worker_threads.append(t)
224+
COORD.join(worker_threads)
225+
226+
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
227+
plt.xlabel('step')
228+
plt.ylabel('Total moving reward')
229+
plt.show()
230+

Reinforcement_learning_TUT/10_A3C/A3C_continuous_action.py

+5-6
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
LR_A = 0.001 # learning rate for actor
3333
LR_C = 0.002 # learning rate for critic
3434
GLOBAL_RUNNING_R = []
35+
GLOBAL_EP = 0
3536

3637
env = gym.make(GAME)
3738

@@ -120,9 +121,10 @@ def __init__(self, name, globalAC):
120121
self.AC = ACNet(name, globalAC)
121122

122123
def work(self):
124+
global GLOBAL_RUNNING_R, GLOBAL_EP
123125
total_step = 1
124126
buffer_s, buffer_a, buffer_r = [], [], []
125-
while not COORD.should_stop() and GLOBAL_EP.eval(SESS) < MAX_GLOBAL_EP:
127+
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
126128
s = self.env.reset()
127129
ep_r = 0
128130
for ep_t in range(MAX_EP_STEP):
@@ -162,25 +164,22 @@ def work(self):
162164
s = s_
163165
total_step += 1
164166
if done:
165-
global GLOBAL_RUNNING_R
166167
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
167168
GLOBAL_RUNNING_R.append(ep_r)
168169
else:
169170
GLOBAL_RUNNING_R.append(0.9 * GLOBAL_RUNNING_R[-1] + 0.1 * ep_r)
170171
print(
171172
self.name,
172-
"Ep:", GLOBAL_EP.eval(SESS),
173+
"Ep:", GLOBAL_EP,
173174
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
174175
)
175-
SESS.run(COUNT_GLOBAL_EP)
176+
GLOBAL_EP += 1
176177
break
177178

178179
if __name__ == "__main__":
179180
SESS = tf.Session()
180181

181182
with tf.device("/cpu:0"):
182-
GLOBAL_EP = tf.Variable(0, dtype=tf.int32, name='global_ep', trainable=False)
183-
COUNT_GLOBAL_EP = tf.assign(GLOBAL_EP, tf.add(GLOBAL_EP, tf.constant(1), name='step_ep'))
184183
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
185184
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
186185
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params

Reinforcement_learning_TUT/10_A3C/A3C_discrete_action.py

+7-8
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
LR_A = 0.001 # learning rate for actor
3333
LR_C = 0.001 # learning rate for critic
3434
GLOBAL_RUNNING_R = []
35+
GLOBAL_EP = 0
3536

3637
env = gym.make(GAME)
3738

@@ -113,14 +114,15 @@ def __init__(self, name, globalAC):
113114
self.AC = ACNet(name, globalAC)
114115

115116
def work(self):
117+
global GLOBAL_RUNNING_R, GLOBAL_EP
116118
total_step = 1
117119
buffer_s, buffer_a, buffer_r = [], [], []
118-
while not COORD.should_stop() and GLOBAL_EP.eval(SESS) < MAX_GLOBAL_EP:
120+
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
119121
s = self.env.reset()
120122
ep_r = 0
121123
while True:
122-
# if self.name == 'W_0':
123-
# self.env.render()
124+
if self.name == 'W_0':
125+
self.env.render()
124126
a = self.AC.choose_action(s)
125127
s_, r, done, info = self.env.step(a)
126128
if done: r = -5
@@ -154,25 +156,22 @@ def work(self):
154156
s = s_
155157
total_step += 1
156158
if done:
157-
global GLOBAL_RUNNING_R
158159
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
159160
GLOBAL_RUNNING_R.append(ep_r)
160161
else:
161162
GLOBAL_RUNNING_R.append(0.99 * GLOBAL_RUNNING_R[-1] + 0.01 * ep_r)
162163
print(
163164
self.name,
164-
"Ep:", GLOBAL_EP.eval(SESS),
165+
"Ep:", GLOBAL_EP,
165166
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
166167
)
167-
SESS.run(COUNT_GLOBAL_EP)
168+
GLOBAL_EP += 1
168169
break
169170

170171
if __name__ == "__main__":
171172
SESS = tf.Session()
172173

173174
with tf.device("/cpu:0"):
174-
GLOBAL_EP = tf.Variable(0, dtype=tf.int32, name='global_ep', trainable=False)
175-
COUNT_GLOBAL_EP = tf.assign(GLOBAL_EP, tf.add(GLOBAL_EP, tf.constant(1), name='step_ep'))
176175
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
177176
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
178177
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params

0 commit comments

Comments
 (0)