actor-critic代码逐行解析(tensorflow版)

深度强化学习算法actor-critic代码逐行解析(tensorflow版)

actor-critic代码逐行解析(tensorflow版)_第1张图片

Actor是基于Policy-Gradients。可以选择连续动作,但是必须循环一个回合才可以更新策略。学习效率低。
Critic网络继承了Q-learning 的传统,依然可以逐步更新。
首先导入需要的包,这没什么好说的。

import numpy as np
import tensorflow as tf
import gym
import matplotlib.pyplot as plt

np.random.seed(2)
tf.set_random_seed(2)  # reproducible

# 超参数
OUTPUT_GRAPH = False
MAX_EPISODE = 5
DISPLAY_REWARD_THRESHOLD = 200  # 刷新阈值
MAX_EP_STEPS = 500  # 最大迭代次数
RENDER = False  # 渲染开关,这玩意儿是gym输出动画的开关
GAMMA = 0.9  # 衰变值
LR_A = 0.001  # Actor学习率
LR_C = 0.01  # Critic学习率

env = gym.make('CartPole-v0')
env.seed(1)
env = env.unwrapped

N_F = env.observation_space.shape[0]  # 状态空间
N_A = env.action_space.n  # 动作空间

Actor网络

class Actor(object):
    def __init__(self, sess, n_features, n_actions, lr=0.001):
        self.sess = sess

        self.s = tf.placeholder(tf.float32, [1, n_features], "state")
        self.a = tf.placeholder(tf.int32, None, "act")
        self.td_error = tf.placeholder(tf.float32, None, "td_error")  # TD_error

        with tf.variable_scope('Actor'):
            l1 = tf.layers.dense(
                inputs=self.s,
                units=20,  # number of hidden units
                activation=tf.nn.relu,
                kernel_initializer=tf.random_normal_initializer(0., .1),  # weights
                bias_initializer=tf.constant_initializer(0.1),  # biases
                name='l1'
            )

            self.acts_prob = tf.layers.dense(
                inputs=l1,
                units=n_actions,  # output units
                activation=tf.nn.softmax,  # get action probabilities
                kernel_initializer=tf.random_normal_initializer(0., .1),  # weights
                bias_initializer=tf.constant_initializer(0.1),  # biases
                name='acts_prob'
            )

        with tf.variable_scope('exp_v'):
            log_prob = tf.log(self.acts_prob[0, self.a])
            self.exp_v = tf.reduce_mean(log_prob * self.td_error)  # advantage (TD_error) guided loss

        with tf.variable_scope('train'):
            self.train_op = tf.train.AdamOptimizer(lr).minimize(-self.exp_v)  # minimize(-exp_v) = maximize(exp_v)

    def learn(self, s, a, td):
        s = s[np.newaxis, :]
        feed_dict = {self.s: s, self.a: a, self.td_error: td}
        _, exp_v = self.sess.run([self.train_op, self.exp_v], feed_dict)
        return exp_v

    def choose_action(self, s):
        s = s[np.newaxis, :]
        probs = self.sess.run(self.acts_prob, {self.s: s}) 
        return np.random.choice(np.arange(probs.shape[1]), p=probs.ravel())  # return a int

具体流程如下:
1、两个全连接层网络,一层神经元的个数为20个,第二层的输入是第一层l1的输出。
2、loss依然是Policy-Gradients的-log(probs)*vt。probs可以看出是第二层神经网络的输出,是动作空间下所有动作的概率。vt是Critic计算出的时间差分误差td_error。
3、训练步骤。喂入动作,状态。和td误差即可。
4、选择动作.

Critic网络
伟大的Critic网络负责给Actor网络最后输出的动作打分,并通过td_error返回给Actor用于更新Actor的参数。同样的也是两层网络。
但是输入有三个。但是和Actor的三个输入不同。因为a是固定的值。
输入当前分别为:当前状态,当前奖励,下一个状态的折扣奖励:

class Critic(object):
    def __init__(self, sess, n_features, lr=0.01):
        self.sess = sess

        self.s = tf.placeholder(tf.float32, [1, n_features], "state")
        self.v_ = tf.placeholder(tf.float32, [1, 1], "v_next")
        self.r = tf.placeholder(tf.float32, None, 'r')

        with tf.variable_scope('Critic'):
            l1 = tf.layers.dense(
                inputs=self.s,
                units=20,  # number of hidden units
                activation=tf.nn.relu,  # None
                # have to be linear to make sure the convergence of actor.
                # But linear approximator seems hardly learns the correct Q.
                kernel_initializer=tf.random_normal_initializer(0., .1),  # weights
                bias_initializer=tf.constant_initializer(0.1),  # biases
                name='l1'
            )

            self.v = tf.layers.dense(
                inputs=l1,
                units=1,  # output units
                activation=None,
                kernel_initializer=tf.random_normal_initializer(0., .1),  # weights
                bias_initializer=tf.constant_initializer(0.1),  # biases
                name='V'
            )

        with tf.variable_scope('squared_TD_error'):
            self.td_error = self.r + GAMMA * self.v_ - self.v
            self.loss = tf.square(self.td_error)  # TD_error = (r+gamma*V_next) - V_eval
        with tf.variable_scope('train'):
            self.train_op = tf.train.AdamOptimizer(lr).minimize(self.loss)

    def learn(self, s, r, s_):
        s, s_ = s[np.newaxis, :], s_[np.newaxis, :]

        v_ = self.sess.run(self.v, {self.s: s_})
        td_error, _ = self.sess.run([self.td_error, self.train_op],
                                    {self.s: s, self.v_: v_, self.r: r})
        return td_error

具体流程如下:
1、两个全连接层网络,一层神经元的个数为20个,第二层的输入是第一层l1的输出。可以发现这个输出只有一维度。就是a做这个动作的奖励。
2、loss:时间差分值的平方(取出下一时刻的动作的奖励)
3、学习步骤:Critic神经网络前向传播一波,将下一个输入,得到评价动作的值,和第一步一样,输入当前状态,当前奖励,下一个状态的折扣奖励v_,用Adam优化器反向传播一波。
.
大功告成网络

sess = tf.Session()
actor = Actor(sess, n_features=N_F, n_actions=N_A, lr=LR_A)  # 初始化Actor
critic = Critic(sess, n_features=N_F, lr=LR_C)  # 初始化Critic
sess.run(tf.global_variables_initializer())  # 初始化参数

if OUTPUT_GRAPH:
    tf.summary.FileWriter("logs/", sess.graph)  # 输出日志
track_r = []  # 每回合的所有奖励
for i_episode in range(MAX_EPISODE):
    s = env.reset()  # gym环境初始化
    t = 0

    while True:
        if RENDER: env.render()
        a = actor.choose_action(s)  # Actor选取动作
        s_, r, done, info = env.step(a)  # 环境反馈
        if done: r = -20  # 回合结束的惩罚

        track_r.append(r)  # 记录回报值r
        td_error = critic.learn(s, r, s_)  # Critic 学习
        actor.learn(s, a, td_error)  # Actor 学习
        s = s_
        t += 1

        if done or t >= MAX_EP_STEPS:
            # 回合结束, 打印回合累积奖励
            ep_rs_sum = sum(track_r)
            if 'running_reward' not in globals():
                running_reward = ep_rs_sum
            else:
                running_reward = running_reward * 0.95 + ep_rs_sum * 0.05
            if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True  # rendering
            print("episode:", i_episode, "  reward:", int(running_reward))
            break

你可能感兴趣的:(tensorflow)