我们有了像 Q-learning这么伟大的算法, 为什么还要瞎折腾出一个 Actor-Critic? 原来 Actor-Critic 的 Actor 的前生是 Policy Gradients, 这能让它毫不费力地在连续动作中选取合适的动作, 而 Q-learning 做这件事会瘫痪. 那为什么不直接用 Policy Gradients 呢? 原来 Actor Critic 中的 Critic 的前生是 Q-learning 或者其他的 以值为基础的学习法 , 能进行单步更新, 而传统的 Policy Gradients 则是回合更新, 这降低了学习效率.
""" Actor-Critic using TD-error as the Advantage, Reinforcement Learning. The cart pole example. Policy is oscillated. View more on my tutorial page: https://morvanzhou.github.io/tutorials/ Using: tensorflow,gym """ import numpy as np import tensorflow as tf import gym np.random.seed(2) tf.set_random_seed(2) # reproducible # Superparameters OUTPUT_GRAPH = False MAX_EPISODE = 100 DISPLAY_REWARD_THRESHOLD = 200 # renders environment if total episode reward is greater then this threshold MAX_EP_STEPS = 1000 # maximum time step in one episode RENDER = False # rendering wastes time GAMMA = 0.9 # reward discount in TD error LR_A = 0.001 # learning rate for actor LR_C = 0.01 # learning rate for critic env = gym.make('CartPole-v0') env.seed(1) # reproducible env = env.unwrapped N_F = env.observation_space.shape[0] N_A = env.action_space.n class Actor(object): def __init__(self, sess, n_features, n_actions, lr=0.001): self.sess = sess #actor进行单词训练,输入的只需要是一个状态一个动作和一个奖励 self.s = tf.placeholder(tf.float32, [1, n_features], "state") self.a = tf.placeholder(tf.int32, None, "act") self.td_error = tf.placeholder(tf.float32, None, "td_error") # TD_error #Actor的神经网络结构和我们的Policy Gradient定义的是一样的,是一个双层的全链接神经网络 with tf.variable_scope('Actor'): l1 = tf.layers.dense( inputs=self.s, units=20, # number of hidden units activation=tf.nn.relu, kernel_initializer=tf.random_normal_initializer(0., .1), # weights bias_initializer=tf.constant_initializer(0.1), # biases name='l1' ) self.acts_prob = tf.layers.dense( inputs=l1, units=n_actions, # output units activation=tf.nn.softmax, # get action probabilities kernel_initializer=tf.random_normal_initializer(0., .1), # weights bias_initializer=tf.constant_initializer(0.1), # biases name='acts_prob' ) with tf.variable_scope('exp_v'): log_prob = tf.log(self.acts_prob[0, self.a]) self.exp_v = tf.reduce_mean(log_prob * self.td_error) # advantage (TD_error) guided loss with tf.variable_scope('train'): self.train_op = tf.train.AdamOptimizer(lr).minimize(-self.exp_v) # minimize(-exp_v) = maximize(exp_v) #Actor的训练只需要将状态,动作以及时间差分值喂给网络就可以。 def learn(self, s, a, td): s = s[np.newaxis, :] feed_dict = {self.s: s, self.a: a, self.td_error: td} _, exp_v = self.sess.run([self.train_op, self.exp_v], feed_dict) return exp_v #选择动作和Policy Gradient一样,根据计算出的softmax值来选择动作 def choose_action(self, s): s = s[np.newaxis, :] probs = self.sess.run(self.acts_prob, {self.s: s}) # get probabilities for all actions return np.random.choice(np.arange(probs.shape[1]), p=probs.ravel()) # return a int #Critic要反馈给Actor一个时间差分值,来决定Actor选择动作的好坏,如果时间差分值大的话, # 说明当前Actor选择的这个动作的惊喜度较高,需要更多的出现来使得时间差分值减小。 class Critic(object): def __init__(self, sess, n_features, lr=0.01): self.sess = sess self.s = tf.placeholder(tf.float32, [1, n_features], "state") self.v_ = tf.placeholder(tf.float32, [1, 1], "v_next") self.r = tf.placeholder(tf.float32, None, 'r') with tf.variable_scope('Critic'): l1 = tf.layers.dense( inputs=self.s, units=20, # number of hidden units activation=tf.nn.relu, # None # have to be linear to make sure the convergence of actor. # But linear approximator seems hardly learns the correct Q. kernel_initializer=tf.random_normal_initializer(0., .1), # weights bias_initializer=tf.constant_initializer(0.1), # biases name='l1' ) self.v = tf.layers.dense( inputs=l1, units=1, # output units activation=None, kernel_initializer=tf.random_normal_initializer(0., .1), # weights bias_initializer=tf.constant_initializer(0.1), # biases name='V' ) #Critic的损失定义为时间差分值的平方值 with tf.variable_scope('squared_TD_error'): self.td_error = self.r + GAMMA * self.v_ - self.v self.loss = tf.square(self.td_error) # TD_error = (r+gamma*V_next) - V_eval with tf.variable_scope('train'): self.train_op = tf.train.AdamOptimizer(lr).minimize(self.loss) #Critic的任务就是告诉Actor当前选择的动作好不好,所以我们只要训练得到TD并返回给Actor就好 def learn(self, s, r, s_): s, s_ = s[np.newaxis, :], s_[np.newaxis, :] v_ = self.sess.run(self.v, {self.s: s_}) td_error, _ = self.sess.run([self.td_error, self.train_op], {self.s: s, self.v_: v_, self.r: r}) return td_error sess = tf.Session() actor = Actor(sess, n_features=N_F, n_actions=N_A, lr=LR_A) critic = Critic(sess, n_features=N_F, lr=LR_C) # we need a good teacher, so the teacher should learn faster than the actor sess.run(tf.global_variables_initializer()) if OUTPUT_GRAPH: tf.summary.FileWriter("logs/", sess.graph) for i_episode in range(MAX_EPISODE): s = env.reset() t = 0 track_r = [] while True: if RENDER: env.render() a = actor.choose_action(s) s_, r, done, info = env.step(a) if done: r = -20 track_r.append(r) td_error = critic.learn(s, r, s_) # gradient = grad[r + gamma * V(s_) - V(s)] actor.learn(s, a, td_error) # true_gradient = grad[logPi(s,a) * td_error] s = s_ t += 1 if done or t >= MAX_EP_STEPS: ep_rs_sum = sum(track_r) if 'running_reward' not in globals(): running_reward = ep_rs_sum else: running_reward = running_reward * 0.95 + ep_rs_sum * 0.05 if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True # rendering print("episode:", i_episode, " reward:", int(running_reward)) break