Skip to content

Commit 7eb220d

Browse files
committed
add new
1 parent 75c492b commit 7eb220d

File tree

5 files changed

+341
-287
lines changed

5 files changed

+341
-287
lines changed

Reinforcement_learning_TUT/10_A3C/A3C_v1.py Reinforcement_learning_TUT/10_A3C/A3C_continuous_action.py

+40-30
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
"""
2-
Asynchronous Advantage Actor Critic (A3C), Reinforcement Learning.
2+
Asynchronous Advantage Actor Critic (A3C) with continuous action space, Reinforcement Learning.
33
44
The Pendulum example. Version 1: convergence promised
55
@@ -22,19 +22,19 @@
2222
tf.set_random_seed(2) # reproducible
2323

2424
GAME = 'Pendulum-v0'
25-
OUTPUT_GRAPH = False
25+
OUTPUT_GRAPH = True
2626
LOG_DIR = './log'
2727
N_WORKERS = multiprocessing.cpu_count()
2828
MAX_EP_STEP = 300
29-
MAX_GLOBAL_EP = 800
29+
MAX_GLOBAL_EP = 1000
3030
GLOBAL_NET_SCOPE = 'Global_Net'
3131
UPDATE_GLOBAL_ITER = 5
3232
GAMMA = 0.9
33+
ENTROPY_BETA = 0.01
3334
LR_A = 0.001 # learning rate for actor
34-
LR_C = 0.002 # learning rate for critic
35+
LR_C = 0.001 # learning rate for critic
3536

3637
env = gym.make(GAME)
37-
env.seed(1) # reproducible
3838

3939
N_S = env.observation_space.shape[0]
4040
N_A = env.action_space.shape[0]
@@ -57,23 +57,29 @@ def __init__(self, scope, n_s, n_a,
5757
with tf.variable_scope(scope):
5858
self.s = tf.placeholder(tf.float32, [None, n_s], 'S')
5959
self.a_his = tf.placeholder(tf.float32, [None, n_a], 'A')
60-
self.v_target = tf.placeholder(tf.float32, [None, 1], 'R')
60+
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
6161

6262
mu, sigma, self.v = self._build_net(n_a)
6363

6464
td = tf.subtract(self.v_target, self.v, name='TD_error')
6565
with tf.name_scope('c_loss'):
66-
self.c_loss = tf.reduce_mean(tf.square(td))
67-
self.mu, self.sigma = tf.squeeze(mu * a_bound[1]), tf.squeeze(sigma + 1e-2)
68-
self.normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma)
66+
self.c_loss = tf.reduce_sum(tf.square(td))
67+
68+
with tf.name_scope('wrap_a_out'):
69+
mu, sigma = mu * a_bound[1], sigma*2 + 1e-2
70+
self.test = sigma[0]
71+
72+
normal_dist = tf.contrib.distributions.Normal(mu, sigma)
73+
6974
with tf.name_scope('a_loss'):
70-
log_prob = self.normal_dist.log_prob(self.a_his)
71-
self.exp_v = tf.reduce_mean(log_prob * td)
72-
self.exp_v += 0.01*self.normal_dist.entropy() # encourage exploration
75+
log_prob = normal_dist.log_prob(self.a_his)
76+
exp_v = log_prob * td
77+
entropy = normal_dist.entropy() # encourage exploration
78+
self.exp_v = tf.reduce_sum(ENTROPY_BETA * entropy + exp_v)
7379
self.a_loss = -self.exp_v
7480

7581
with tf.name_scope('choose_a'): # use local params to choose action
76-
self.A = tf.clip_by_value(self.normal_dist.sample([1]), a_bound[0], a_bound[1])
82+
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), a_bound[0], a_bound[1])
7783
with tf.name_scope('local_grad'):
7884
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
7985
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
@@ -91,11 +97,11 @@ def __init__(self, scope, n_s, n_a,
9197
def _build_net(self, n_a):
9298
w_init = tf.random_normal_initializer(0., .1)
9399
with tf.variable_scope('actor'):
94-
l_a = tf.layers.dense(self.s, 100, tf.nn.relu, kernel_initializer=w_init, name='la')
100+
l_a = tf.layers.dense(self.s, 50, tf.nn.relu, kernel_initializer=w_init, name='la')
95101
mu = tf.layers.dense(l_a, n_a, tf.nn.tanh, kernel_initializer=w_init, name='mu')
96-
sigma = tf.layers.dense(l_a, n_a, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
102+
sigma = tf.layers.dense(l_a, n_a, tf.nn.sigmoid, kernel_initializer=w_init, name='sigma')
97103
with tf.variable_scope('critic'):
98-
l_c = tf.layers.dense(self.s, 60, tf.nn.relu, kernel_initializer=w_init, name='lc')
104+
l_c = tf.layers.dense(self.s, 50, tf.nn.relu, kernel_initializer=w_init, name='lc')
99105
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
100106
return mu, sigma, v
101107

@@ -107,7 +113,7 @@ def pull_global(self): # run by a local
107113

108114
def choose_action(self, s): # run by a local
109115
s = s[np.newaxis, :]
110-
return self.sess.run(self.A, {self.s: s})
116+
return self.sess.run(self.A, {self.s: s})[0]
111117

112118

113119
class Worker(object):
@@ -119,7 +125,7 @@ def __init__(self, env, name, n_s, n_a, a_bound, sess, opt_a, opt_c, g_a_params,
119125

120126
def work(self, update_iter, max_ep_step, gamma, coord):
121127
total_step = 1
122-
buffer_s, buffer_a, buffer_r, buffer_s_ = [], [], [], []
128+
buffer_s, buffer_a, buffer_r = [], [], []
123129
while not coord.should_stop() and GLOBAL_EP.eval(self.sess) < MAX_GLOBAL_EP:
124130
s = self.env.reset()
125131
ep_r = 0
@@ -128,27 +134,31 @@ def work(self, update_iter, max_ep_step, gamma, coord):
128134
self.env.render()
129135
a = self.AC.choose_action(s)
130136
s_, r, done, info = self.env.step(a)
131-
r /= 10
137+
r /= 10 # normalize reward
132138
ep_r += r
133139
buffer_s.append(s)
134140
buffer_a.append(a)
135141
buffer_r.append(r)
136-
buffer_s_.append(s_)
137142

138143
if total_step % update_iter == 0 or done: # update global and assign to local net
139-
buffer_s, buffer_a, buffer_r, buffer_s_ = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_r), np.vstack(buffer_s_)
140-
141-
v_next = self.sess.run(self.AC.v, {self.AC.s: buffer_s_})
142-
if done: v_next[-1, 0] = 0
143-
v_target = buffer_r + gamma * v_next
144-
144+
if done:
145+
v_s_ = 0 # terminal
146+
else:
147+
v_s_ = self.sess.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
148+
buffer_v_target = []
149+
for r in buffer_r[::-1]: # reverse buffer r
150+
v_s_ = r + gamma * v_s_
151+
buffer_v_target.append(v_s_)
152+
buffer_v_target.reverse()
153+
154+
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
145155
feed_dict = {
146156
self.AC.s: buffer_s,
147157
self.AC.a_his: buffer_a,
148-
self.AC.v_target: v_target,
158+
self.AC.v_target: buffer_v_target,
149159
}
150160
self.AC.update_global(feed_dict)
151-
buffer_s, buffer_a, buffer_r, buffer_s_ = [], [], [], []
161+
buffer_s, buffer_a, buffer_r = [], [], []
152162
self.AC.pull_global()
153163

154164
s = s_
@@ -168,8 +178,8 @@ def work(self, update_iter, max_ep_step, gamma, coord):
168178
with tf.device("/cpu:0"):
169179
GLOBAL_EP = tf.Variable(0, dtype=tf.int32, name='global_ep', trainable=False)
170180
COUNT_GLOBAL_EP = tf.assign(GLOBAL_EP, tf.add(GLOBAL_EP, tf.constant(1), name='step_ep'))
171-
OPT_A = tf.train.RMSPropOptimizer(LR_A)
172-
OPT_C = tf.train.RMSPropOptimizer(LR_C)
181+
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
182+
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
173183
globalAC = ACNet(GLOBAL_NET_SCOPE, N_S, N_A) # we only need its params
174184
workers = []
175185
# Create worker

Reinforcement_learning_TUT/10_A3C/A3C_v2.py

-194
This file was deleted.

0 commit comments

Comments
 (0)