gym == 0.21.0
stable-baselines3 == 1.6.2
环境介绍:Mountain Car
import gym
# Create environment
env = gym.make("MountainCar-v0")
eposides = 10
for eq in range(eposides):
obs = env.reset()
done = False
rewards = 0
while not done:
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
env.render()
rewards += reward
print(rewards)
环境测试视频:Mountain Car test
import gym
import numpy as np
env = gym.make("MountainCar-v0")
# Q-Learning settings
LEARNING_RATE = 0.1
DISCOUNT = 0.95
EPISODES = 25000
SHOW_EVERY = 1000
# Exploration settings
epsilon = 1 # not a constant, qoing to be decayed
START_EPSILON_DECAYING = 1
END_EPSILON_DECAYING = EPISODES//2
epsilon_decay_value = epsilon/(END_EPSILON_DECAYING - START_EPSILON_DECAYING)
DISCRETE_OS_SIZE = [20, 20]
discrete_os_win_size = (env.observation_space.high - env.observation_space.low) / DISCRETE_OS_SIZE
print(discrete_os_win_size)
def get_discrete_state(state):
discrete_state = (state - env.observation_space.low)/discrete_os_win_size
return tuple(discrete_state.astype(np.int64)) # we use this tuple to look up the 3 Q values for the available actions in the q-
q_table = np.random.uniform(low=-2, high=0, size=(DISCRETE_OS_SIZE + [env.action_space.n]))
for episode in range(EPISODES):
state = env.reset()
discrete_state = get_discrete_state(state)
if episode % SHOW_EVERY == 0:
render = True
print(episode)
else:
render = False
done = False
while not done:
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(q_table[discrete_state])
else:
# Get random action
action = np.random.randint(0, env.action_space.n)
new_state, reward, done, _ = env.step(action)
new_discrete_state = get_discrete_state(new_state)
# If simulation did not end yet after last step - update Q table
if not done:
# Maximum possible Q value in next step (for new state)
max_future_q = np.max(q_table[new_discrete_state])
# Current Q value (for current state and performed action)
current_q = q_table[discrete_state + (action,)]
# And here's our equation for a new Q value for current state and action
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
# Update Q table with new Q value
q_table[discrete_state + (action,)] = new_q
# Simulation ended (for any reson) - if goal position is achived - update Q value with reward directly
elif new_state[0] >= env.goal_position:
# q_table[discrete_state + (action,)] = reward
q_table[discrete_state + (action,)] = 0
print("we made it on episode {}".format(episode))
discrete_state = new_discrete_state
if render:
env.render()
# Decaying is being done every episode if episode number is within decaying range
if END_EPSILON_DECAYING >= episode >= START_EPSILON_DECAYING:
epsilon -= epsilon_decay_value
np.save("q_table.npy", arr=q_table)
env.close()
鉴于该环境相对简单,用Q-learning方法生成的q表较小,不存在维度爆炸问题,采用Q-learning能实现准确控制,读取生成的q表测试代码如下:
import gym
import numpy as np
env = gym.make("MountainCar-v0")
# Q-Learning settings
LEARNING_RATE = 0.1
DISCOUNT = 0.95
EPISODES = 10
DISCRETE_OS_SIZE = [20, 20]
discrete_os_win_size = (env.observation_space.high - env.observation_space.low) / DISCRETE_OS_SIZE
def get_discrete_state(state):
discrete_state = (state - env.observation_space.low)/discrete_os_win_size
return tuple(discrete_state.astype(np.int64)) # we use this tuple to look up the 3 Q values for the available actions in the q-
q_table = np.load(file="q_table.npy")
for episode in range(EPISODES):
state = env.reset()
discrete_state = get_discrete_state(state)
rewards = 0
done = False
while not done:
# Get action from Q table
action = np.argmax(q_table[discrete_state])
new_state, reward, done, _ = env.step(action)
new_discrete_state = get_discrete_state(new_state)
rewards += reward
# If simulation did not end yet after last step - update Q table
if done and new_state[0] >= env.goal_position:
print("we made it on episode {}, rewards {}".format(episode, rewards))
discrete_state = new_discrete_state
env.render()
env.close()
Q-learning测试视频结果:Mountain Car Qlearning
由视频可以看出,小车每次都能够达到目标点。
采用stable-baseline3默认的DQN网络架构(64,64),学习率为5e-4,训练次数为1.5million次,训练代码如下:
import gym
from stable_baselines3 import DQN
# Create environment
env = gym.make("MountainCar-v0")
model = DQN(
"MlpPolicy",
env,
verbose=1,
learning_rate=5e-4)
# Train the agent and display a progress bar
model.learn(
total_timesteps=int(1.5e6),
progress_bar=True)
# Save the agent
model.save("DQN_MountainCar")
模型测试代码如下:
import gym
from stable_baselines3 import DQN
from stable_baselines3.common.evaluation import evaluate_policy
# Create environment
env = gym.make("MountainCar-v0")
# load model
model = DQN.load("DQN_MountainCar", env=env)
mean_reward, std_reward = evaluate_policy(
model,
model.get_env(),
deterministic=True,
render=True,
n_eval_episodes=10)
print(mean_reward)
测试结果视频:Mountain Car DQN
根据视频可看出小车每次都能到达终点。
stable-baseline3: 手册
gym: 手册