Skip to content

Commit 7eb220d

Browse files
committedMar 29, 2017
add new
1 parent 75c492b commit 7eb220d

File tree

5 files changed

+341
-287
lines changed

5 files changed

+341
-287
lines changed
 

‎Reinforcement_learning_TUT/10_A3C/A3C_v1.py ‎Reinforcement_learning_TUT/10_A3C/A3C_continuous_action.py

+40-30
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
"""
2-
Asynchronous Advantage Actor Critic (A3C), Reinforcement Learning.
2+
Asynchronous Advantage Actor Critic (A3C) with continuous action space, Reinforcement Learning.
33
44
The Pendulum example. Version 1: convergence promised
55
@@ -22,19 +22,19 @@
2222
tf.set_random_seed(2) # reproducible
2323

2424
GAME = 'Pendulum-v0'
25-
OUTPUT_GRAPH = False
25+
OUTPUT_GRAPH = True
2626
LOG_DIR = './log'
2727
N_WORKERS = multiprocessing.cpu_count()
2828
MAX_EP_STEP = 300
29-
MAX_GLOBAL_EP = 800
29+
MAX_GLOBAL_EP = 1000
3030
GLOBAL_NET_SCOPE = 'Global_Net'
3131
UPDATE_GLOBAL_ITER = 5
3232
GAMMA = 0.9
33+
ENTROPY_BETA = 0.01
3334
LR_A = 0.001 # learning rate for actor
34-
LR_C = 0.002 # learning rate for critic
35+
LR_C = 0.001 # learning rate for critic
3536

3637
env = gym.make(GAME)
37-
env.seed(1) # reproducible
3838

3939
N_S = env.observation_space.shape[0]
4040
N_A = env.action_space.shape[0]
@@ -57,23 +57,29 @@ def __init__(self, scope, n_s, n_a,
5757
with tf.variable_scope(scope):
5858
self.s = tf.placeholder(tf.float32, [None, n_s], 'S')
5959
self.a_his = tf.placeholder(tf.float32, [None, n_a], 'A')
60-
self.v_target = tf.placeholder(tf.float32, [None, 1], 'R')
60+
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
6161

6262
mu, sigma, self.v = self._build_net(n_a)
6363

6464
td = tf.subtract(self.v_target, self.v, name='TD_error')
6565
with tf.name_scope('c_loss'):
66-
self.c_loss = tf.reduce_mean(tf.square(td))
67-
self.mu, self.sigma = tf.squeeze(mu * a_bound[1]), tf.squeeze(sigma + 1e-2)
68-
self.normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma)
66+
self.c_loss = tf.reduce_sum(tf.square(td))
67+
68+
with tf.name_scope('wrap_a_out'):
69+
mu, sigma = mu * a_bound[1], sigma*2 + 1e-2
70+
self.test = sigma[0]
71+
72+
normal_dist = tf.contrib.distributions.Normal(mu, sigma)
73+
6974
with tf.name_scope('a_loss'):
70-
log_prob = self.normal_dist.log_prob(self.a_his)
71-
self.exp_v = tf.reduce_mean(log_prob * td)
72-
self.exp_v += 0.01*self.normal_dist.entropy() # encourage exploration
75+
log_prob = normal_dist.log_prob(self.a_his)
76+
exp_v = log_prob * td
77+
entropy = normal_dist.entropy() # encourage exploration
78+
self.exp_v = tf.reduce_sum(ENTROPY_BETA * entropy + exp_v)
7379
self.a_loss = -self.exp_v
7480

7581
with tf.name_scope('choose_a'): # use local params to choose action
76-
self.A = tf.clip_by_value(self.normal_dist.sample([1]), a_bound[0], a_bound[1])
82+
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), a_bound[0], a_bound[1])
7783
with tf.name_scope('local_grad'):
7884
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
7985
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
@@ -91,11 +97,11 @@ def __init__(self, scope, n_s, n_a,
9197
def _build_net(self, n_a):
9298
w_init = tf.random_normal_initializer(0., .1)
9399
with tf.variable_scope('actor'):
94-
l_a = tf.layers.dense(self.s, 100, tf.nn.relu, kernel_initializer=w_init, name='la')
100+
l_a = tf.layers.dense(self.s, 50, tf.nn.relu, kernel_initializer=w_init, name='la')
95101
mu = tf.layers.dense(l_a, n_a, tf.nn.tanh, kernel_initializer=w_init, name='mu')
96-
sigma = tf.layers.dense(l_a, n_a, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
102+
sigma = tf.layers.dense(l_a, n_a, tf.nn.sigmoid, kernel_initializer=w_init, name='sigma')
97103
with tf.variable_scope('critic'):
98-
l_c = tf.layers.dense(self.s, 60, tf.nn.relu, kernel_initializer=w_init, name='lc')
104+
l_c = tf.layers.dense(self.s, 50, tf.nn.relu, kernel_initializer=w_init, name='lc')
99105
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
100106
return mu, sigma, v
101107

@@ -107,7 +113,7 @@ def pull_global(self): # run by a local
107113

108114
def choose_action(self, s): # run by a local
109115
s = s[np.newaxis, :]
110-
return self.sess.run(self.A, {self.s: s})
116+
return self.sess.run(self.A, {self.s: s})[0]
111117

112118

113119
class Worker(object):
@@ -119,7 +125,7 @@ def __init__(self, env, name, n_s, n_a, a_bound, sess, opt_a, opt_c, g_a_params,
119125

120126
def work(self, update_iter, max_ep_step, gamma, coord):
121127
total_step = 1
122-
buffer_s, buffer_a, buffer_r, buffer_s_ = [], [], [], []
128+
buffer_s, buffer_a, buffer_r = [], [], []
123129
while not coord.should_stop() and GLOBAL_EP.eval(self.sess) < MAX_GLOBAL_EP:
124130
s = self.env.reset()
125131
ep_r = 0
@@ -128,27 +134,31 @@ def work(self, update_iter, max_ep_step, gamma, coord):
128134
self.env.render()
129135
a = self.AC.choose_action(s)
130136
s_, r, done, info = self.env.step(a)
131-
r /= 10
137+
r /= 10 # normalize reward
132138
ep_r += r
133139
buffer_s.append(s)
134140
buffer_a.append(a)
135141
buffer_r.append(r)
136-
buffer_s_.append(s_)
137142

138143
if total_step % update_iter == 0 or done: # update global and assign to local net
139-
buffer_s, buffer_a, buffer_r, buffer_s_ = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_r), np.vstack(buffer_s_)
140-
141-
v_next = self.sess.run(self.AC.v, {self.AC.s: buffer_s_})
142-
if done: v_next[-1, 0] = 0
143-
v_target = buffer_r + gamma * v_next
144-
144+
if done:
145+
v_s_ = 0 # terminal
146+
else:
147+
v_s_ = self.sess.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
148+
buffer_v_target = []
149+
for r in buffer_r[::-1]: # reverse buffer r
150+
v_s_ = r + gamma * v_s_
151+
buffer_v_target.append(v_s_)
152+
buffer_v_target.reverse()
153+
154+
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
145155
feed_dict = {
146156
self.AC.s: buffer_s,
147157
self.AC.a_his: buffer_a,
148-
self.AC.v_target: v_target,
158+
self.AC.v_target: buffer_v_target,
149159
}
150160
self.AC.update_global(feed_dict)
151-
buffer_s, buffer_a, buffer_r, buffer_s_ = [], [], [], []
161+
buffer_s, buffer_a, buffer_r = [], [], []
152162
self.AC.pull_global()
153163

154164
s = s_
@@ -168,8 +178,8 @@ def work(self, update_iter, max_ep_step, gamma, coord):
168178
with tf.device("/cpu:0"):
169179
GLOBAL_EP = tf.Variable(0, dtype=tf.int32, name='global_ep', trainable=False)
170180
COUNT_GLOBAL_EP = tf.assign(GLOBAL_EP, tf.add(GLOBAL_EP, tf.constant(1), name='step_ep'))
171-
OPT_A = tf.train.RMSPropOptimizer(LR_A)
172-
OPT_C = tf.train.RMSPropOptimizer(LR_C)
181+
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
182+
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
173183
globalAC = ACNet(GLOBAL_NET_SCOPE, N_S, N_A) # we only need its params
174184
workers = []
175185
# Create worker

‎Reinforcement_learning_TUT/10_A3C/A3C_v2.py

-194
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,235 @@
1+
"""
2+
Asynchronous Advantage Actor Critic (A3C), Reinforcement Learning.
3+
4+
The BipedalWalker example.
5+
6+
View more on [莫烦Python] : https://morvanzhou.github.io/tutorials/
7+
8+
Using:
9+
tensorflow 1.0
10+
gym 0.8.0
11+
"""
12+
13+
import multiprocessing
14+
import threading
15+
import tensorflow as tf
16+
import numpy as np
17+
import gym
18+
import os
19+
import shutil
20+
21+
np.random.seed(2)
22+
tf.set_random_seed(2) # reproducible
23+
24+
GAME = 'BipedalWalker-v2'
25+
# GAME = 'Pendulum-v0'
26+
OUTPUT_GRAPH = True
27+
LOG_DIR = './log'
28+
N_WORKERS = multiprocessing.cpu_count() + 2
29+
MAX_GLOBAL_EP = 10000
30+
GLOBAL_NET_SCOPE = 'Global_Net'
31+
SHARED_LAYER = True
32+
UPDATE_GLOBAL_ITER = 5
33+
GAMMA = 0.99
34+
ENTROPY_BETA = 0.01
35+
LR_A = 0.0001 # learning rate for actor
36+
LR_C = 0.0001 # learning rate for critic
37+
38+
env = gym.make(GAME)
39+
env.seed(1) # reproducible
40+
41+
N_S = env.observation_space.shape[0]
42+
N_A = env.action_space.shape[0]
43+
A_BOUND = [env.action_space.low, env.action_space.high]
44+
45+
46+
class ACNet(object):
47+
def __init__(self, scope, n_s, n_a,
48+
a_bound=None, sess=None,
49+
opt_a=None, opt_c=None, global_a_params=None, global_c_params=None):
50+
51+
if scope == GLOBAL_NET_SCOPE: # get global network
52+
with tf.variable_scope(scope):
53+
self.s = tf.placeholder(tf.float32, [None, n_s], 'S')
54+
self._build_net(n_a, SHARED_LAYER)
55+
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
56+
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
57+
else: # local net, calculate losses
58+
self.sess = sess
59+
with tf.variable_scope(scope):
60+
self.s = tf.placeholder(tf.float32, [None, n_s], 'S')
61+
self.a_his = tf.placeholder(tf.float32, [None, n_a], 'A')
62+
# self.a_his = tf.squeeze(self.a_his)
63+
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
64+
65+
mu, sigma, self.v = self._build_net(n_a, SHARED_LAYER)
66+
67+
td = tf.subtract(self.v_target, self.v, name='TD_error')
68+
with tf.name_scope('c_loss'):
69+
self.c_loss = tf.reduce_sum(tf.square(td))
70+
71+
with tf.name_scope('wrap_a_out'):
72+
mu, sigma = mu * a_bound[1], sigma + 1e-2
73+
self.test = sigma[0]
74+
75+
# the distribution result has been flatten
76+
normal_dist = tf.contrib.distributions.Normal(mu, sigma)
77+
78+
with tf.name_scope('a_loss'):
79+
log_prob = normal_dist.log_prob(self.a_his)
80+
exp_v = log_prob * td
81+
entropy = normal_dist.entropy() # encourage exploration
82+
self.exp_v = tf.reduce_sum(ENTROPY_BETA*entropy+exp_v)
83+
self.a_loss = -self.exp_v
84+
85+
with tf.name_scope('choose_a'): # use local params to choose action
86+
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), a_bound[0], a_bound[1])
87+
88+
with tf.name_scope('local_grad'):
89+
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
90+
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
91+
self.a_grads = tf.gradients(self.a_loss, self.a_params) # get local gradients
92+
self.c_grads = tf.gradients(self.c_loss, self.c_params)
93+
94+
with tf.name_scope('sync'):
95+
with tf.name_scope('pull'):
96+
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, global_a_params)]
97+
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, global_c_params)]
98+
with tf.name_scope('push'):
99+
self.update_a_op = opt_a.apply_gradients(zip(self.a_grads, global_a_params))
100+
self.update_c_op = opt_c.apply_gradients(zip(self.c_grads, global_c_params))
101+
102+
def _build_net(self, n_a, shared):
103+
w_init = tf.random_normal_initializer(0., .01)
104+
if shared:
105+
with tf.variable_scope('actor'):
106+
la = tf.layers.dense(self.s, 200, tf.nn.relu, kernel_initializer=w_init, name='share')
107+
la = tf.layers.dense(la, 20, tf.nn.relu, kernel_initializer=w_init, name='la')
108+
mu = tf.layers.dense(la, n_a, tf.nn.tanh, kernel_initializer=w_init, name='mu')
109+
sigma = tf.layers.dense(la, n_a, tf.nn.sigmoid, kernel_initializer=w_init, name='sigma')
110+
111+
# shared layer for critic
112+
lc = tf.layers.dense(self.s, 200, tf.nn.relu, name='share', reuse=True)
113+
114+
with tf.variable_scope('critic'):
115+
lc = tf.layers.dense(lc, 20, tf.nn.relu, kernel_initializer=w_init, name='lc2')
116+
v = tf.layers.dense(lc, 1, kernel_initializer=w_init, name='v') # state value
117+
else:
118+
with tf.variable_scope('actor'):
119+
l_a = tf.layers.dense(self.s, 200, tf.nn.relu, kernel_initializer=w_init, name='la')
120+
l_a = tf.layers.dense(l_a, 20, tf.nn.tanh, kernel_initializer=w_init, name='la2')
121+
mu = tf.layers.dense(l_a, n_a, tf.nn.tanh, kernel_initializer=w_init, name='mu')
122+
sigma = tf.layers.dense(l_a, n_a, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
123+
with tf.variable_scope('critic'):
124+
l_c = tf.layers.dense(self.s, 200, tf.nn.relu, kernel_initializer=w_init, name='lc1')
125+
l_c = tf.layers.dense(l_c, 20, tf.nn.relu, kernel_initializer=w_init, name='lc2')
126+
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
127+
return mu, sigma, v
128+
129+
def update_global(self, feed_dict): # run by a local
130+
_, _, test = self.sess.run([self.update_a_op, self.update_c_op, self.test], feed_dict) # local grads applies to global net
131+
return test
132+
133+
def pull_global(self): # run by a local
134+
self.sess.run([self.pull_a_params_op, self.pull_c_params_op])
135+
136+
def choose_action(self, s): # run by a local
137+
s = s[np.newaxis, :]
138+
a = self.sess.run(self.A, {self.s: s})
139+
return a.flatten()
140+
141+
142+
class Worker(object):
143+
def __init__(self, env, i, n_s, n_a, a_bound, sess, opt_a, opt_c, g_a_params, g_c_params):
144+
self.env = env
145+
self.id = i
146+
self.sess = sess
147+
self.name = 'W_%i' % i
148+
self.AC = ACNet(self.name, n_s, n_a, a_bound, sess, opt_a, opt_c, g_a_params, g_c_params)
149+
150+
def work(self, update_iter, gamma, coord):
151+
total_step = 1
152+
buffer_s, buffer_a, buffer_r = [], [], []
153+
while not coord.should_stop() and GLOBAL_EP.eval(self.sess) < MAX_GLOBAL_EP:
154+
s = self.env.reset()
155+
ep_r = 0
156+
while True:
157+
# if self.name == 'W_0' and total_step % 50 == 0:
158+
# self.env.render()
159+
a = self.AC.choose_action(s)
160+
s_, r, done, info = self.env.step(a)
161+
if r == -100: r = -2
162+
ep_r += r
163+
buffer_s.append(s)
164+
buffer_a.append(a)
165+
buffer_r.append(r)
166+
167+
if total_step % update_iter == 0 or done: # update global and assign to local net
168+
if done:
169+
v_s_ = 0 # terminal
170+
else:
171+
v_s_ = self.sess.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
172+
buffer_v_target = []
173+
for r in buffer_r[::-1]: # reverse buffer r
174+
v_s_ = r + gamma * v_s_
175+
buffer_v_target.append(v_s_)
176+
buffer_v_target.reverse()
177+
178+
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
179+
feed_dict = {
180+
self.AC.s: buffer_s,
181+
self.AC.a_his: buffer_a,
182+
self.AC.v_target: buffer_v_target,
183+
}
184+
test = self.AC.update_global(feed_dict)
185+
buffer_s, buffer_a, buffer_r = [], [], []
186+
self.AC.pull_global()
187+
188+
s = s_
189+
total_step += 1
190+
191+
if done:
192+
print(
193+
self.name,
194+
"Ep:", GLOBAL_EP.eval(self.sess),
195+
'| pos: %i' % self.env.unwrapped.hull.position[0],
196+
"| r: %.2f" % np.sum(ep_r),
197+
'| stddev:', test,
198+
)
199+
sess.run(COUNT_GLOBAL_EP)
200+
break
201+
202+
if __name__ == "__main__":
203+
sess = tf.Session()
204+
205+
with tf.device("/cpu:0"):
206+
GLOBAL_EP = tf.Variable(0, dtype=tf.int32, name='global_ep', trainable=False)
207+
COUNT_GLOBAL_EP = tf.assign(GLOBAL_EP, tf.add(GLOBAL_EP, tf.constant(1), name='step_ep'))
208+
OPT_A = tf.train.RMSPropOptimizer(LR_A, .9)
209+
OPT_C = tf.train.RMSPropOptimizer(LR_C, .9)
210+
globalAC = ACNet(GLOBAL_NET_SCOPE, N_S, N_A) # we only need its params
211+
workers = []
212+
# Create worker
213+
for i in range(N_WORKERS):
214+
workers.append(
215+
Worker(
216+
gym.make(GAME), i, N_S, N_A, A_BOUND, sess,
217+
OPT_A, OPT_C, globalAC.a_params, globalAC.c_params
218+
))
219+
220+
coord = tf.train.Coordinator()
221+
sess.run(tf.global_variables_initializer())
222+
223+
if OUTPUT_GRAPH:
224+
if os.path.exists(LOG_DIR):
225+
shutil.rmtree(LOG_DIR)
226+
tf.summary.FileWriter(LOG_DIR, sess.graph)
227+
228+
worker_threads = []
229+
for worker in workers:
230+
job = lambda: worker.work(UPDATE_GLOBAL_ITER, GAMMA, coord)
231+
t = threading.Thread(target=job)
232+
t.start()
233+
worker_threads.append(t)
234+
coord.join(worker_threads)
235+

‎Reinforcement_learning_TUT/experiments/Solve_BipedalWalker/DDPG.py

+62-59
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,44 @@
77
np.random.seed(1)
88
tf.set_random_seed(1)
99

10+
MAX_EPISODES = 2000
11+
LR_A = 0.0002 # learning rate for actor
12+
LR_C = 0.0002 # learning rate for critic
13+
GAMMA = 0.9999 # reward discount
14+
REPLACE_ITER_A = 1700
15+
REPLACE_ITER_C = 1500
16+
MEMORY_CAPACITY = 200000
17+
BATCH_SIZE = 32
18+
DISPLAY_THRESHOLD = 100 # display until the running reward > 100
19+
DATA_PATH = './data'
20+
LOAD_MODEL = False
21+
SAVE_MODEL_ITER = 50000
22+
RENDER = False
23+
OUTPUT_GRAPH = False
24+
ENV_NAME = 'BipedalWalker-v2'
25+
26+
GLOBAL_STEP = tf.Variable(0, trainable=False)
27+
INCREASE_GS = GLOBAL_STEP.assign(tf.add(GLOBAL_STEP, 1))
28+
LR_A = tf.train.exponential_decay(LR_A, GLOBAL_STEP, 10000, .97, staircase=True)
29+
LR_C = tf.train.exponential_decay(LR_C, GLOBAL_STEP, 10000, .97, staircase=True)
30+
END_POINT = (200 - 10) * (14/30) # from game
31+
32+
env = gym.make(ENV_NAME)
33+
env.seed(1)
34+
35+
STATE_DIM = env.observation_space.shape[0] # 24
36+
ACTION_DIM = env.action_space.shape[0] # 4
37+
ACTION_BOUND = env.action_space.high # [1, 1, 1, 1]
38+
39+
# all placeholder for tf
40+
with tf.name_scope('S'):
41+
S = tf.placeholder(tf.float32, shape=[None, STATE_DIM], name='s')
42+
with tf.name_scope('A'):
43+
A = tf.placeholder(tf.float32, shape=[None, ACTION_DIM], name='a')
44+
with tf.name_scope('R'):
45+
R = tf.placeholder(tf.float32, [None, 1], name='r')
46+
with tf.name_scope('S_'):
47+
S_ = tf.placeholder(tf.float32, shape=[None, STATE_DIM], name='s_')
1048

1149
############################### Actor ####################################
1250

@@ -31,14 +69,13 @@ def __init__(self, sess, action_dim, action_bound, learning_rate, t_replace_iter
3169

3270
def _build_net(self, s, scope, trainable):
3371
with tf.variable_scope(scope):
34-
init_w = tf.random_normal_initializer(0., 0.1)
35-
init_b = tf.constant_initializer(0.1)
36-
net = tf.layers.dense(s, 400, activation=tf.nn.relu,
37-
kernel_initializer=init_w, bias_initializer=init_b, name='l1',
38-
trainable=trainable)
39-
net = tf.layers.dense(net, 20, activation=tf.nn.relu,
40-
kernel_initializer=init_w, bias_initializer=init_b, name='l2',
41-
trainable=trainable)
72+
init_w = tf.random_normal_initializer(0., 0.01)
73+
init_b = tf.constant_initializer(0.01)
74+
net = tf.layers.dense(s, 500, activation=tf.nn.relu,
75+
kernel_initializer=init_w, bias_initializer=init_b, name='l1', trainable=trainable)
76+
net = tf.layers.dense(net, 200, activation=tf.nn.relu,
77+
kernel_initializer=init_w, bias_initializer=init_b, name='l2', trainable=trainable)
78+
4279
with tf.variable_scope('a'):
4380
actions = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, kernel_initializer=init_w,
4481
bias_initializer=init_b, name='a', trainable=trainable)
@@ -107,19 +144,19 @@ def __init__(self, sess, state_dim, action_dim, learning_rate, gamma, t_replace_
107144

108145
def _build_net(self, s, a, scope, trainable):
109146
with tf.variable_scope(scope):
110-
init_w = tf.random_normal_initializer(0., 0.1)
111-
init_b = tf.constant_initializer(0.1)
147+
init_w = tf.random_normal_initializer(0., 0.01)
148+
init_b = tf.constant_initializer(0.01)
112149

113150
with tf.variable_scope('l1'):
114-
n_l1 = 400
151+
n_l1 = 700
152+
# combine the action and states together in this way
115153
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], initializer=init_w, trainable=trainable)
116154
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], initializer=init_w, trainable=trainable)
117155
b1 = tf.get_variable('b1', [1, n_l1], initializer=init_b, trainable=trainable)
118156
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
119157
with tf.variable_scope('l2'):
120158
net = tf.layers.dense(net, 20, activation=tf.nn.relu, kernel_initializer=init_w,
121159
bias_initializer=init_b, name='l2', trainable=trainable)
122-
123160
with tf.variable_scope('q'):
124161
q = tf.layers.dense(net, 1, kernel_initializer=init_w, bias_initializer=init_b, trainable=trainable) # Q(s,a)
125162
return q
@@ -217,7 +254,7 @@ class Memory(object): # stored as ( s, a, r, s_ ) in SumTree
217254
epsilon = 0.001 # small amount to avoid zero priority
218255
alpha = 0.6 # [0~1] convert the importance of TD error to priority
219256
beta = 0.4 # importance-sampling, from initial value increasing to 1
220-
beta_increment_per_sampling = 1e-4 # annealing the bias
257+
beta_increment_per_sampling = 1e-5 # annealing the bias
221258
abs_err_upper = 1 # for stability refer to paper
222259

223260
def __init__(self, capacity):
@@ -268,48 +305,11 @@ def _get_priority(self, error):
268305
return np.power(clipped_error, self.alpha)
269306

270307

271-
MAX_EPISODES = 2000
272-
LR_A = 0.0001 # learning rate for actor
273-
LR_C = 0.0001 # learning rate for critic
274-
GAMMA = 0.999 # reward discount
275-
REPLACE_ITER_A = 1700
276-
REPLACE_ITER_C = 1500
277-
MEMORY_CAPACITY = 200000
278-
BATCH_SIZE = 32
279-
DISPLAY_THRESHOLD = 60
280-
DATA_PATH = './data'
281-
LOAD_MODEL = True
282-
SAVE_MODEL_ITER = 50000
283-
RENDER = False
284-
OUTPUT_GRAPH = False
285-
ENV_NAME = 'BipedalWalker-v2'
286-
287-
GLOBAL_STEP = tf.Variable(0, trainable=False)
288-
INCREASE_GS = GLOBAL_STEP.assign(tf.add(GLOBAL_STEP, 1))
289-
END_POINT = (200 - 10) * (14/30) # from game
290-
291-
env = gym.make(ENV_NAME)
292-
env.seed(1)
293-
294-
state_dim = env.observation_space.shape[0] # 24
295-
action_dim = env.action_space.shape[0] # 4
296-
action_bound = env.action_space.high # [1, 1, 1, 1]
297-
298-
# all placeholder for tf
299-
with tf.name_scope('S'):
300-
S = tf.placeholder(tf.float32, shape=[None, state_dim], name='s')
301-
with tf.name_scope('A'):
302-
A = tf.placeholder(tf.float32, shape=[None, action_dim], name='a')
303-
with tf.name_scope('R'):
304-
R = tf.placeholder(tf.float32, [None, 1], name='r')
305-
with tf.name_scope('S_'):
306-
S_ = tf.placeholder(tf.float32, shape=[None, state_dim], name='s_')
307-
308308
sess = tf.Session()
309309

310310
# Create actor and critic.
311-
actor = Actor(sess, action_dim, action_bound, LR_A, REPLACE_ITER_A)
312-
critic = Critic(sess, state_dim, action_dim, LR_C, GAMMA, REPLACE_ITER_C, actor.a_)
311+
actor = Actor(sess, ACTION_DIM, ACTION_BOUND, LR_A, REPLACE_ITER_A)
312+
critic = Critic(sess, STATE_DIM, ACTION_DIM, LR_C, GAMMA, REPLACE_ITER_C, actor.a_)
313313
actor.add_grad_to_graph(critic.a_grads)
314314

315315
M = Memory(MEMORY_CAPACITY)
@@ -328,7 +328,7 @@ def _get_priority(self, error):
328328
tf.summary.FileWriter('logs', graph=sess.graph)
329329

330330
var = 3 # control exploration
331-
var_min = 0.008
331+
var_min = 0.001
332332

333333
for i_episode in range(MAX_EPISODES):
334334
# s = (hull angle speed, angular velocity, horizontal speed, vertical speed, position of joints and joints angular speed, legs contact with ground, and 10 lidar rangefinder measurements.)
@@ -342,17 +342,19 @@ def _get_priority(self, error):
342342
s_, r, done, _ = env.step(a) # r = total 300+ points up to the far end. If the robot falls, it gets -100.
343343

344344
if r == -100: r = -2
345+
ep_r += r
346+
345347
transition = np.hstack((s, a, [r], s_))
346348
max_p = np.max(M.tree.tree[-M.tree.capacity:])
347349
M.store(max_p, transition)
348350

349351
if GLOBAL_STEP.eval(sess) > MEMORY_CAPACITY/20:
350-
var = max([var*0.99995, var_min]) # decay the action randomness
352+
var = max([var*0.9999, var_min]) # decay the action randomness
351353
tree_idx, b_M, ISWeights = M.prio_sample(BATCH_SIZE) # for critic update
352-
b_s = b_M[:, :state_dim]
353-
b_a = b_M[:, state_dim: state_dim + action_dim]
354-
b_r = b_M[:, -state_dim - 1: -state_dim]
355-
b_s_ = b_M[:, -state_dim:]
354+
b_s = b_M[:, :STATE_DIM]
355+
b_a = b_M[:, STATE_DIM: STATE_DIM + ACTION_DIM]
356+
b_r = b_M[:, -STATE_DIM - 1: -STATE_DIM]
357+
b_s_ = b_M[:, -STATE_DIM:]
356358

357359
abs_td = critic.learn(b_s, b_a, b_r, b_s_, ISWeights)
358360
actor.learn(b_s, b_a)
@@ -379,9 +381,10 @@ def _get_priority(self, error):
379381
'| Epi_r: %.2f' % ep_r,
380382
'| Exploration: %.3f' % var,
381383
'| Pos: %.i' % int(env.unwrapped.hull.position[0]),
384+
'| LR_A: %.6f' % sess.run(LR_A),
385+
'| LR_C: %.6f' % sess.run(LR_C),
382386
)
383387
break
384388

385389
s = s_
386-
ep_r += r
387390
sess.run(INCREASE_GS)

‎tensorflowTUT/tf20_RNN2.2/full_code.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -149,10 +149,10 @@ def _bias_variable(self, shape, name='biases'):
149149
feed_dict=feed_dict)
150150

151151
# plotting
152-
# plt.plot(xs[0, :], res[0].flatten(), 'r', xs[0, :], pred.flatten()[:TIME_STEPS], 'b--')
153-
# plt.ylim((-1.2, 1.2))
154-
# plt.draw()
155-
# plt.pause(0.3)
152+
plt.plot(xs[0, :], res[0].flatten(), 'r', xs[0, :], pred.flatten()[:TIME_STEPS], 'b--')
153+
plt.ylim((-1.2, 1.2))
154+
plt.draw()
155+
plt.pause(0.3)
156156

157157
if i % 20 == 0:
158158
print('cost: ', round(cost, 4))

0 commit comments

Comments
 (0)
Please sign in to comment.