如何解决在冰湖游戏中从 Q-Learning 算法实现 SARSA
我正在使用 Q-Learning 和 SARSA 算法解决冰湖游戏。我有 Q-Learning 算法的代码实现,并且有效。此代码取自 Maxim Lapan 的“深度强化学习实践”的第 5 章。我正在尝试更改此代码以实现 SARSA 而不是 Q-Learning,但我不知道如何去做。我研究了这两种算法,但对它们如何转换为代码感到迷惑。为了实施 SARSA,我必须对此代码进行哪些更改?
# Code pulled from Max Lapan textbook
#
#!/usr/bin/env python3
import gym
import collections
import tensorboard
import torch
from torch.utils.tensorboard import SummaryWriter
ENV_NAME = "FrozenLake-v0"
GAMMA = 0.9
TEST_EPISODES = 150
class Agent:
def __init__(self):
self.env = gym.make(ENV_NAME)
self.state = self.env.reset()
self.rewards = collections.defaultdict(float)
self.transits = collections.defaultdict(collections.Counter)
self.values = collections.defaultdict(float)
def play_n_random_steps(self,count):
for _ in range(count):
action = self.env.action_space.sample()
new_state,reward,is_done,_ = self.env.step(action)
self.rewards[(self.state,action,new_state)] = reward
self.transits[(self.state,action)][new_state] += 1
self.state = self.env.reset() if is_done else new_state
def select_action(self,state):
best_action,best_value = None,None
for action in range(self.env.action_space.n):
action_value = self.values[(state,action)]
if best_value is None or best_value < action_value:
best_value = action_value
best_action = action
return best_action
def play_episode(self,env):
total_reward = 0.0
state = env.reset()
while True:
action = self.select_action(state)
new_state,_ = env.step(action)
self.rewards[(state,new_state)] = reward
self.transits[(state,action)][new_state] += 1
total_reward += reward
if is_done:
break
state = new_state
return total_reward
def value_iteration(self):
for state in range(self.env.observation_space.n):
for action in range(self.env.action_space.n):
action_value = 0.0
target_counts = self.transits[(state,action)]
total = sum(target_counts.values())
for tgt_state,count in target_counts.items():
reward = self.rewards[(state,tgt_state)]
best_action = self.select_action(tgt_state)
action_value += (count / total) * (reward + GAMMA * self.values[(tgt_state,best_action)])
self.values[(state,action)] = action_value
if __name__ == "__main__":
test_env = gym.make(ENV_NAME)
agent = Agent()
writer = SummaryWriter(comment="-q-iteration")
iter_no = 0
best_reward = 0.0
while True:
iter_no += 1
agent.play_n_random_steps(100)
agent.value_iteration()
reward = 0.0
for _ in range(TEST_EPISODES):
reward += agent.play_episode(test_env)
reward /= TEST_EPISODES
writer.add_scalar("reward",iter_no)
if reward > best_reward:
print("Best reward updated %.3f -> %.3f" % (best_reward,reward))
best_reward = reward
if reward > 0.80:
print("Solved in %d iterations!" % iter_no)
break
writer.close()
解决方法
我不知道它是否会有所帮助,但我过去开发了一种算法,可以比较 Gridworld 游戏中 2 个代理的性能。其中一个代理使用 Q-learning,另一个使用 SARSA。
您将在此处找到代码文件:https://github.com/Elpazzu/AI-models/blob/master/Reinforcement-Learning/Gridworld
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。