Skip to content

Commit 9c27c85

Browse files
author
Morvan Zhou
committed
Initial commit
0 parents  commit 9c27c85

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

50 files changed

+7178
-0
lines changed

README.md

+50
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
<p align="center">
2+
<a href="https://www.youtube.com/watch?v=pieI7rOXELI&list=PLXO45tsB95cIplu-fLMpUEEZTwrDNh6Ba" target="_blank">
3+
<img width="60%" src="https://github.com/MorvanZhou/tutorials/blob/master/Reinforcement_learning_TUT/RL_cover.jpg" style="max-width:100%;">
4+
</a>
5+
</p>
6+
7+
---
8+
9+
<br>
10+
11+
# Reinforcement Learning Methods and Tutorials
12+
13+
In these tutorials for reinforcement learning, it covers from the basic RL algorithms to advanced algorithms developed recent years.
14+
15+
**For Chinese speaker, visit [莫烦 Python](https://morvanzhou.github.io/tutorials/) or my [Youtube channel](https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg) for more.**
16+
17+
**As many requests about making these tutorials available in English, please find them in this playlist:** ([https://www.youtube.com/playlist?list=PLXO45tsB95cIplu-fLMpUEEZTwrDNh6Ba](https://www.youtube.com/playlist?list=PLXO45tsB95cIplu-fLMpUEEZTwrDNh6Ba))
18+
19+
20+
* [Simple entry example](https://github.com/MorvanZhou/tutorials/tree/master/Reinforcement_learning_TUT/1_command_line_reinforcement_learning)
21+
* Tabular Methods
22+
* [Q-learning](https://github.com/MorvanZhou/tutorials/tree/master/Reinforcement_learning_TUT/2_Q_Learning_maze)
23+
* [Sarsa](https://github.com/MorvanZhou/tutorials/tree/master/Reinforcement_learning_TUT/3_Sarsa_maze)
24+
* [Sarsa(lambda)](https://github.com/MorvanZhou/tutorials/tree/master/Reinforcement_learning_TUT/4_Sarsa_lambda_maze)
25+
* Function Approximation (DQN)
26+
* [Deep Q Network](https://github.com/MorvanZhou/tutorials/tree/master/Reinforcement_learning_TUT/5_Deep_Q_Network)
27+
* [Using OpenAI Gym](https://github.com/MorvanZhou/tutorials/tree/master/Reinforcement_learning_TUT/6_OpenAI_gym)
28+
* DQN-based methods
29+
* [Double DQN](https://github.com/MorvanZhou/tutorials/tree/master/Reinforcement_learning_TUT/5.1_Double_DQN)
30+
* [DQN with Prioitized Experience Replay](https://github.com/MorvanZhou/tutorials/tree/master/Reinforcement_learning_TUT/5.2_Prioritized_Replay_DQN)
31+
* [Dueling DQN](https://github.com/MorvanZhou/tutorials/tree/master/Reinforcement_learning_TUT/5.3_Dueling_DQN)
32+
* [Policy Gradients](https://github.com/MorvanZhou/tutorials/tree/master/Reinforcement_learning_TUT/7_Policy_gradient_softmax)
33+
* [Actor Critic](https://github.com/MorvanZhou/tutorials/tree/master/Reinforcement_learning_TUT/8_Actor_Critic_Advantage)
34+
* [Deep Deterministic Policy Gradient](https://github.com/MorvanZhou/tutorials/tree/master/Reinforcement_learning_TUT/9_Deep_Deterministic_Policy_Gradient_DDPG)
35+
* [A3C](https://github.com/MorvanZhou/tutorials/tree/master/Reinforcement_learning_TUT/10_A3C)
36+
* Model-based RL (WIP)
37+
* [Dyna-Q](https://github.com/MorvanZhou/tutorials/tree/master/Reinforcement_learning_TUT/11_Dyna_Q)
38+
39+
40+
# Donation
41+
42+
*If this does help you, please consider donating to support me for better tutorials. Any contribution is greatly appreciated!*
43+
44+
<div >
45+
<a href="https://www.paypal.com/cgi-bin/webscr?cmd=_donations&amp;business=morvanzhou%40gmail%2ecom&amp;lc=C2&amp;item_name=MorvanPython&amp;currency_code=AUD&amp;bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted">
46+
<img style="border-radius: 20px; box-shadow: 0px 0px 10px 1px #888888;"
47+
src="https://www.paypalobjects.com/webstatic/en_US/i/btn/png/silver-pill-paypal-44px.png"
48+
alt="Paypal"
49+
height="auto" ></a>
50+
</div>

RL_cover.jpg

68.1 KB
Loading

contents/10_A3C/A3C_RNN.py

+230
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,230 @@
1+
"""
2+
Asynchronous Advantage Actor Critic (A3C) + RNN with continuous action space, Reinforcement Learning.
3+
4+
The Pendulum example.
5+
6+
View more on [莫烦Python] : https://morvanzhou.github.io/tutorials/
7+
8+
Using:
9+
tensorflow 1.0
10+
gym 0.8.0
11+
"""
12+
13+
import multiprocessing
14+
import threading
15+
import tensorflow as tf
16+
import numpy as np
17+
import gym
18+
import os
19+
import shutil
20+
import matplotlib.pyplot as plt
21+
22+
GAME = 'Pendulum-v0'
23+
OUTPUT_GRAPH = True
24+
LOG_DIR = './log'
25+
N_WORKERS = multiprocessing.cpu_count()
26+
MAX_EP_STEP = 400
27+
MAX_GLOBAL_EP = 800
28+
GLOBAL_NET_SCOPE = 'Global_Net'
29+
UPDATE_GLOBAL_ITER = 5
30+
GAMMA = 0.9
31+
ENTROPY_BETA = 0.01
32+
LR_A = 0.0001 # learning rate for actor
33+
LR_C = 0.001 # learning rate for critic
34+
GLOBAL_RUNNING_R = []
35+
GLOBAL_EP = 0
36+
37+
env = gym.make(GAME)
38+
39+
N_S = env.observation_space.shape[0]
40+
N_A = env.action_space.shape[0]
41+
A_BOUND = [env.action_space.low, env.action_space.high]
42+
43+
44+
class ACNet(object):
45+
def __init__(self, scope, globalAC=None):
46+
47+
if scope == GLOBAL_NET_SCOPE: # get global network
48+
with tf.variable_scope(scope):
49+
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
50+
self._build_net()
51+
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
52+
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
53+
else: # local net, calculate losses
54+
with tf.variable_scope(scope):
55+
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
56+
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A')
57+
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
58+
59+
mu, sigma, self.v = self._build_net()
60+
61+
td = tf.subtract(self.v_target, self.v, name='TD_error')
62+
with tf.name_scope('c_loss'):
63+
self.c_loss = tf.reduce_mean(tf.square(td))
64+
65+
with tf.name_scope('wrap_a_out'):
66+
mu, sigma = mu * A_BOUND[1], sigma + 1e-4
67+
68+
normal_dist = tf.contrib.distributions.Normal(mu, sigma)
69+
70+
with tf.name_scope('a_loss'):
71+
log_prob = normal_dist.log_prob(self.a_his)
72+
exp_v = log_prob * td
73+
entropy = normal_dist.entropy() # encourage exploration
74+
self.exp_v = ENTROPY_BETA * entropy + exp_v
75+
self.a_loss = tf.reduce_mean(-self.exp_v)
76+
77+
with tf.name_scope('choose_a'): # use local params to choose action
78+
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), A_BOUND[0], A_BOUND[1])
79+
with tf.name_scope('local_grad'):
80+
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
81+
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
82+
self.a_grads = tf.gradients(self.a_loss, self.a_params)
83+
self.c_grads = tf.gradients(self.c_loss, self.c_params)
84+
85+
with tf.name_scope('sync'):
86+
with tf.name_scope('pull'):
87+
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
88+
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
89+
with tf.name_scope('push'):
90+
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
91+
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
92+
93+
def _build_net(self):
94+
w_init = tf.random_normal_initializer(0., .1)
95+
with tf.variable_scope('critic'): # only critic controls the rnn update
96+
cell_size = 32
97+
s = tf.expand_dims(self.s, axis=1,
98+
name='timely_input') # [time_step, feature] => [time_step, batch, feature]
99+
rnn_cell = tf.contrib.rnn.BasicRNNCell(cell_size)
100+
self.init_state = rnn_cell.zero_state(batch_size=1, dtype=tf.float32)
101+
outputs, self.final_state = tf.nn.dynamic_rnn(
102+
cell=rnn_cell, inputs=s, initial_state=self.init_state, time_major=True)
103+
cell_out = tf.reshape(outputs, [-1, cell_size], name='flatten_rnn_outputs') # joined state representation
104+
l_c = tf.layers.dense(cell_out, 50, tf.nn.relu6, kernel_initializer=w_init, name='lc')
105+
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
106+
107+
with tf.variable_scope('actor'): # state representation is based on critic
108+
cell_out = tf.stop_gradient(cell_out, name='c_cell_out') # from what critic think it is
109+
l_a = tf.layers.dense(cell_out, 80, tf.nn.relu6, kernel_initializer=w_init, name='la')
110+
mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_init, name='mu')
111+
sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
112+
return mu, sigma, v
113+
114+
def update_global(self, feed_dict): # run by a local
115+
SESS.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
116+
117+
def pull_global(self): # run by a local
118+
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
119+
120+
def choose_action(self, s, cell_state): # run by a local
121+
s = s[np.newaxis, :]
122+
a, cell_state = SESS.run([self.A, self.final_state], {self.s: s, self.init_state: cell_state})
123+
return a[0], cell_state
124+
125+
126+
class Worker(object):
127+
def __init__(self, name, globalAC):
128+
self.env = gym.make(GAME).unwrapped
129+
self.name = name
130+
self.AC = ACNet(name, globalAC)
131+
132+
def work(self):
133+
global GLOBAL_RUNNING_R, GLOBAL_EP
134+
total_step = 1
135+
buffer_s, buffer_a, buffer_r = [], [], []
136+
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
137+
s = self.env.reset()
138+
ep_r = 0
139+
rnn_state = SESS.run(self.AC.init_state) # zero rnn state at beginning
140+
keep_state = rnn_state.copy() # keep rnn state for updating global net
141+
for ep_t in range(MAX_EP_STEP):
142+
if self.name == 'W_0':
143+
self.env.render()
144+
145+
a, rnn_state_ = self.AC.choose_action(s, rnn_state) # get the action and next rnn state
146+
s_, r, done, info = self.env.step(a)
147+
done = True if ep_t == MAX_EP_STEP - 1 else False
148+
r /= 10 # normalize reward
149+
150+
ep_r += r
151+
buffer_s.append(s)
152+
buffer_a.append(a)
153+
buffer_r.append(r)
154+
155+
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
156+
if done:
157+
v_s_ = 0 # terminal
158+
else:
159+
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :], self.AC.init_state: rnn_state_})[0, 0]
160+
buffer_v_target = []
161+
for r in buffer_r[::-1]: # reverse buffer r
162+
v_s_ = r + GAMMA * v_s_
163+
buffer_v_target.append(v_s_)
164+
buffer_v_target.reverse()
165+
166+
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
167+
168+
feed_dict = {
169+
self.AC.s: buffer_s,
170+
self.AC.a_his: buffer_a,
171+
self.AC.v_target: buffer_v_target,
172+
self.AC.init_state: keep_state,
173+
}
174+
175+
self.AC.update_global(feed_dict)
176+
buffer_s, buffer_a, buffer_r = [], [], []
177+
self.AC.pull_global()
178+
keep_state = rnn_state_.copy() # replace the keep_state as the new initial rnn state_
179+
180+
s = s_
181+
rnn_state = rnn_state_ # renew rnn state
182+
total_step += 1
183+
184+
if done:
185+
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
186+
GLOBAL_RUNNING_R.append(ep_r)
187+
else:
188+
GLOBAL_RUNNING_R.append(0.9 * GLOBAL_RUNNING_R[-1] + 0.1 * ep_r)
189+
print(
190+
self.name,
191+
"Ep:", GLOBAL_EP,
192+
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
193+
)
194+
GLOBAL_EP += 1
195+
break
196+
197+
if __name__ == "__main__":
198+
SESS = tf.Session()
199+
200+
with tf.device("/cpu:0"):
201+
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
202+
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
203+
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
204+
workers = []
205+
# Create worker
206+
for i in range(N_WORKERS):
207+
i_name = 'W_%i' % i # worker name
208+
workers.append(Worker(i_name, GLOBAL_AC))
209+
210+
COORD = tf.train.Coordinator()
211+
SESS.run(tf.global_variables_initializer())
212+
213+
if OUTPUT_GRAPH:
214+
if os.path.exists(LOG_DIR):
215+
shutil.rmtree(LOG_DIR)
216+
tf.summary.FileWriter(LOG_DIR, SESS.graph)
217+
218+
worker_threads = []
219+
for worker in workers:
220+
job = lambda: worker.work()
221+
t = threading.Thread(target=job)
222+
t.start()
223+
worker_threads.append(t)
224+
COORD.join(worker_threads)
225+
226+
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
227+
plt.xlabel('step')
228+
plt.ylabel('Total moving reward')
229+
plt.show()
230+

0 commit comments

Comments
 (0)