QValues爆炸DQN

如何解决QValues爆炸DQN

我正在尝试遵循有关QLearning的pytorch tutorial。我保留了核心,但是尝试以不同的方式实现它。但是,我偶然发现了问题。在最佳情况下,该算法似乎无法解决环境问题,而在最坏情况下,则会发生预测QValue爆炸。我找不到根本原因

我试图调整大多数超参数,降低学习率,然后我检查了几次代码,以查看我的代码和pytorch网站上的代码之间是否存在差异。

有人知道什么可能导致此行为吗?还是看到代码中的错误?我从堆栈溢出中检查了大多数帖子,其中大多数都是关于学习率或目标网络的。我的代码中有一个目标网络,我尝试降低LR,但不幸的是它似乎无法解决问题

import gym
import torch
import random
import numpy as np
from collections import deque
from copy import deepcopy
import torch.nn as nn
import torch.optim as optim
from itertools import count
import torch.nn.functional as F
from collections import namedtuple

GAMMA = 0.99
EPSILON_END = 0.05
EPSILON_START = 0.9
EPSILON_DECAY = 500
LEARNING_RATE = 0.0001

ACTION_SPACE = 4
OBSERVATION_SPACE = 8

BATCH_SIZE = 32
MEMORY_CAPACITY = 10000
TARGET_NETWORK_UPDATE_FREQUENCY = 2

Experience = namedtuple('Experience',['state','action','reward','next_state'])


class Network(nn.Module):

    def __init__(self,input_features,output_features):
        super().__init__()
        self.fc_1 = nn.Linear(input_features,100)
        self.bn_1 = nn.LayerNorm(100)
        self.fc_2 = nn.Linear(100,100)
        self.bn_2 = nn.LayerNorm(100)
        self.fc_3 = nn.Linear(100,output_features)

    def forward(self,x):
        x = F.relu(self.bn_1(self.fc_1(x)))
        x = F.relu(self.bn_2(self.fc_2(x)))
        return self.fc_3(x)


class Memory(object):

    def __init__(self,capacity):
        self.capacity = capacity
        self.memory = []
        self.position = 0

    def push(self,experience):
        if len(self.memory) < self.capacity:
            self.memory.append(None)
        self.memory[self.position] = experience
        self.position = (self.position + 1) % self.capacity

    def sample(self,batch_size):
        return random.sample(self.memory,batch_size)

    def __len__(self):
        return len(self.memory)


class Agent:

    def __init__(self,policy_network,action_space):
        self.policy_network = policy_network
        self.actions = list(range(action_space))

    def __call__(self,observation,step_id=None):
        if np.random.random() > get_epsilon(step_id):
            with torch.no_grad():
                predictions = self.policy_network(torch.tensor(observation)).detach().numpy()
                return np.argmax(predictions)
        else:
            return np.random.choice(self.actions)


class QLearning:

    def __init__(self,agent: Agent,target_network: nn.Module,optimizer,memory_capacity,batch_size):
        self.step = 0
        self.agent = agent
        self.target_network = target_network
        self.optimizer = optimizer
        self.batch_size = batch_size
        self.memory = Memory(memory_capacity)

    def train(self,num_episodes):
        env = gym.make('LunarLander-v2')
        recent_scores = deque(maxlen=100)
        for episode_id in range(num_episodes):
            observation = env.reset()
            score = 0
            for t in count():
                action = self.agent(observation,self.step)
                observation_,reward,done,info = env.step(action.item())

                observation_ = None if done else observation_
                experience = self.preprocess_experience(observation,action,observation_)
                self.memory.push(experience)

                q_values = self.learn()
                self.step += 1
                score += reward

                if done is True:
                    break

            recent_scores.append(score)

            if episode_id % TARGET_NETWORK_UPDATE_FREQUENCY == 0:
                self.update_target_network()

            if episode_id % 10 == 0:
                print(episode_id,np.mean(recent_scores),t)  # TODO Tensorboard
                print('sample of the state_action_values :',list(q_values))

    def learn(self):
        if len(self.memory) < self.batch_size:
            return
        experiences = self.memory.sample(self.batch_size)
        batch = Experience(*zip(*experiences))

        states = torch.cat(batch.state)
        actions = torch.cat(batch.action)
        rewards = torch.cat(batch.reward)
        state_action_values = self.agent.policy_network(states).gather(1,actions)

        non_terminal_mask = torch.tensor(tuple(map(lambda s: s is not None,batch.next_state)),dtype=torch.bool)
        non_terminal_next_states = torch.cat([s for s in batch.next_state if s is not None])

        next_state_values = torch.zeros(BATCH_SIZE)
        next_state_values[non_terminal_mask] = self.target_network(non_terminal_next_states).max(1)[0].detach()

        expected_state_action_values = (next_state_values * GAMMA) + rewards
        expected_state_action_values = expected_state_action_values.unsqueeze(1)

        loss = F.smooth_l1_loss(state_action_values,expected_state_action_values)
        self.optimizer.zero_grad()
        loss.backward()

        for param in self.agent.policy_network.parameters():
            param.grad.data.clamp_(-1,1)
        self.optimizer.step()

        return state_action_values.detach().tolist()[:3] # This is just to monitor if there is an issue with the values predicted

    def update_target_network(self):
        self.target_network.load_state_dict(self.agent.policy_network.state_dict())

    @staticmethod
    def preprocess_experience(state,next_state):
        state = torch.tensor(state).unsqueeze(0)
        next_state = torch.tensor(next_state).unsqueeze(0) if next_state is not None else None
        action = torch.tensor(action,dtype=torch.int64).view(1,1)
        reward = torch.tensor(reward,dtype=torch.float32).view(1)
        return Experience(state,next_state)


def get_networks_and_optimizer(learning_rate,observation_space,action_space):
    policy_network = Network(observation_space,action_space)
    target_network = deepcopy(policy_network)
    target_network.eval()
    policy_network_optimizer = optim.RMSprop(policy_network.parameters(),learning_rate)
    return policy_network,target_network,policy_network_optimizer


def get_epsilon(step):
    if step is None:
        return EPSILON_END
    return EPSILON_END + (EPSILON_START - EPSILON_END) * np.exp(-1. * step / EPSILON_DECAY)


if __name__ == '__main__':
    policy_network,policy_network_optimizer = get_networks_and_optimizer(LEARNING_RATE,OBSERVATION_SPACE,ACTION_SPACE)
    agent = Agent(policy_network,ACTION_SPACE)
    trainer = QLearning(agent,policy_network_optimizer,MEMORY_CAPACITY,BATCH_SIZE)
    trainer.train(10000)

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐


使用本地python环境可以成功执行 import pandas as pd import matplotlib.pyplot as plt # 设置字体 plt.rcParams[&#39;font.sans-serif&#39;] = [&#39;SimHei&#39;] # 能正确显示负号 p
错误1:Request method ‘DELETE‘ not supported 错误还原:controller层有一个接口,访问该接口时报错:Request method ‘DELETE‘ not supported 错误原因:没有接收到前端传入的参数,修改为如下 参考 错误2:cannot r
错误1:启动docker镜像时报错:Error response from daemon: driver failed programming external connectivity on endpoint quirky_allen 解决方法:重启docker -&gt; systemctl r
错误1:private field ‘xxx‘ is never assigned 按Altʾnter快捷键,选择第2项 参考:https://blog.csdn.net/shi_hong_fei_hei/article/details/88814070 错误2:启动时报错,不能找到主启动类 #
报错如下,通过源不能下载,最后警告pip需升级版本 Requirement already satisfied: pip in c:\users\ychen\appdata\local\programs\python\python310\lib\site-packages (22.0.4) Coll
错误1:maven打包报错 错误还原:使用maven打包项目时报错如下 [ERROR] Failed to execute goal org.apache.maven.plugins:maven-resources-plugin:3.2.0:resources (default-resources)
错误1:服务调用时报错 服务消费者模块assess通过openFeign调用服务提供者模块hires 如下为服务提供者模块hires的控制层接口 @RestController @RequestMapping(&quot;/hires&quot;) public class FeignControl
错误1:运行项目后报如下错误 解决方案 报错2:Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.8.1:compile (default-compile) on project sb 解决方案:在pom.
参考 错误原因 过滤器或拦截器在生效时,redisTemplate还没有注入 解决方案:在注入容器时就生效 @Component //项目运行时就注入Spring容器 public class RedisBean { @Resource private RedisTemplate&lt;String
使用vite构建项目报错 C:\Users\ychen\work&gt;npm init @vitejs/app @vitejs/create-app is deprecated, use npm init vite instead C:\Users\ychen\AppData\Local\npm-
参考1 参考2 解决方案 # 点击安装源 协议选择 http:// 路径填写 mirrors.aliyun.com/centos/8.3.2011/BaseOS/x86_64/os URL类型 软件库URL 其他路径 # 版本 7 mirrors.aliyun.com/centos/7/os/x86
报错1 [root@slave1 data_mocker]# kafka-console-consumer.sh --bootstrap-server slave1:9092 --topic topic_db [2023-12-19 18:31:12,770] WARN [Consumer clie
错误1 # 重写数据 hive (edu)&gt; insert overwrite table dwd_trade_cart_add_inc &gt; select data.id, &gt; data.user_id, &gt; data.course_id, &gt; date_format(
错误1 hive (edu)&gt; insert into huanhuan values(1,&#39;haoge&#39;); Query ID = root_20240110071417_fe1517ad-3607-41f4-bdcf-d00b98ac443e Total jobs = 1
报错1:执行到如下就不执行了,没有显示Successfully registered new MBean. [root@slave1 bin]# /usr/local/software/flume-1.9.0/bin/flume-ng agent -n a1 -c /usr/local/softwa
虚拟及没有启动任何服务器查看jps会显示jps,如果没有显示任何东西 [root@slave2 ~]# jps 9647 Jps 解决方案 # 进入/tmp查看 [root@slave1 dfs]# cd /tmp [root@slave1 tmp]# ll 总用量 48 drwxr-xr-x. 2
报错1 hive&gt; show databases; OK Failed with exception java.io.IOException:java.lang.RuntimeException: Error in configuring object Time taken: 0.474 se
报错1 [root@localhost ~]# vim -bash: vim: 未找到命令 安装vim yum -y install vim* # 查看是否安装成功 [root@hadoop01 hadoop]# rpm -qa |grep vim vim-X11-7.4.629-8.el7_9.x
修改hadoop配置 vi /usr/local/software/hadoop-2.9.2/etc/hadoop/yarn-site.xml # 添加如下 &lt;configuration&gt; &lt;property&gt; &lt;name&gt;yarn.nodemanager.res