运行时错误:输入的所有元素都应该在 0 和 1 之间

如何解决运行时错误:输入的所有元素都应该在 0 和 1 之间

我想在蛋白质嵌入上使用 pytorch 使用带有 bilstm 层的 RNN。它适用于线性层,但是当我使用 Bilstm 时出现运行时错误。抱歉,如果不清楚这是我的第一篇文章,如果有人能帮助我,我将不胜感激。

from collections import Counter,OrderedDict
from typing import Optional
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn.functional as F  # noqa
from deepchain import log
from sklearn.model_selection import train_test_split
from sklearn.utils.class_weight import compute_class_weight
from torch import Tensor,nn
num_layers=2
hidden_size=256
from torch.utils.data import DataLoader,TensorDataset

def classification_dataloader_from_numpy(
    x: np.ndarray,y: np.array,batch_size: int = 32
) -> DataLoader:
    """Build a dataloader from numpy for classification problem

    This dataloader is use only for classification. It detects automatically the class of
    the problem (binary or multiclass classification)
    Args:
        x (np.ndarray): [description]
        y (np.array): [description]
        batch_size (int,optional): [description]. Defaults to None.

    Returns:
        DataLoader: [description]
    """
    n_class: int = len(np.unique(y))
    if n_class > 2:
        log.info("This is a classification problem with %s classes",n_class)
    else:
        log.info("This is a binary classification problem")

    # y is float for binary classification,int for multiclass
    y_tensor = torch.tensor(y).long() if len(np.unique(y)) > 2 else torch.tensor(y).float()
    tensor_set = TensorDataset(torch.tensor(x).float(),y_tensor)
    loader = DataLoader(tensor_set,batch_size=batch_size)
    return loader
class RNN(pl.LightningModule):
     
    """A `pytorch` based deep learning model"""
    def __init__(self,input_shape: int,n_class: int,num_layers,n_neurons: int = 128,lr: float = 1e-3):
        super(RNN,self).__init__()
        self.lr = lr
        self.n_neurons=n_neurons
        self.num_layers=num_layers
        self.input_shape = input_shape
        self.output_shape = 1 if n_class <= 2 else n_class
        self.activation = nn.Sigmoid() if n_class <= 2 else nn.Softmax(dim=-1)
        self.lstm = nn.LSTM(self.input_shape,self.n_neurons,batch_first=True,bidirectional=True)
        self.fc= nn.Linear(self.n_neurons,self.output_shape)
    def forward(self,x):
        h0=torch.zeros(self.num_layers,x_size(0),self.n_neurons).to(device)
        c0=torch.zeros(self.num_layers,self.n_neurons).to(device)
        out,_=self.lstm(x,(h0,c0))
        out=self.fc(out[:,-1,:])
        return self.fc(x)

    def training_step(self,batch,batch_idx):
        """training_step defined the train loop. It is independent of forward"""
        x,y = batch
        y_hat = self.fc(x).squeeze()
        y = y.squeeze()
        if self.output_shape > 1:
            y_hat = torch.log(y_hat)
        loss = self.loss(y_hat,y)
        self.log("train_loss",loss,on_epoch=True,on_step=False)
        return {"loss": loss}
    def validation_step(self,y)
        self.log("val_loss",on_step=False)
        return {"val_loss": loss}
    def configure_optimizers(self):
        """(Optional) Configure training optimizers."""
        return torch.optim.Adam(self.parameters(),lr=self.lr)
    def compute_class_weight(self,n_class: int):
        """Compute class weight for binary/multiple classification
        If n_class=2,only compute weights for the positve class.
        If n>2,compute for all classes.
        Args:
            y ([np.array]):vector of int represented the class
            n_class (int) : number fo class to use
        """
        if n_class == 2:
            class_count: typing.Counter = Counter(y)
            cond_binary = (0 in class_count) and (1 in class_count)
            assert cond_binary,"Must have O and 1 class for binary classification"
            weight = class_count[0] / class_count[1]
        else:
            weight = compute_class_weight(class_weight="balanced",classes=np.unique(y),y=y)
        return torch.tensor(weight).float()
    def fit(
        self,x: np.ndarray,epochs: int = 10,batch_size: int = 32,class_weight: Optional[str] = None,validation_data: bool = True,**kwargs
    ):
        assert isinstance(x,np.ndarray),"X should be a numpy array"
        assert isinstance(y,"y should be a numpy array"
        assert class_weight in (
            None,"balanced",),"the only choice available for class_weight is 'balanced'"
        n_class = len(np.unique(y))
        weight = None
        self.input_shape = x.shape[1]
        self.output_shape = 1 if n_class <= 2 else n_class
        self.activation = nn.Sigmoid() if n_class <= 2 else nn.Softmax(dim=-1)
        if class_weight == "balanced":
            weight = self.compute_class_weight(y,n_class)
        self.loss = nn.NLLLoss(weight) if self.output_shape > 1 else nn.BCELoss(weight)
        if validation_data:
            x_train,x_val,y_train,y_val = train_test_split(x,y,test_size=0.2)
            train_loader = classification_dataloader_from_numpy(
                x_train,batch_size=batch_size
            )
            val_loader = classification_dataloader_from_numpy(x_val,y_val,batch_size=batch_size)
        else:
            train_loader = classification_dataloader_from_numpy(x,batch_size=batch_size)
            val_loader = None
        self.trainer = pl.Trainer(max_epochs=epochs,**kwargs)
        self.trainer.fit(self,train_loader,val_loader)
    def predict(self,x):
        """Run inference on data."""
        if self.output_shape is None:
            log.warning("Model is not fitted. Can't do predict")
            return
        return self.forward(x).detach().numpy()
    def save(self,path: str):
        """Save the state dict model with torch"""
        torch.save(self.fc.state_dict(),path)
        log.info("Save state_dict parameters in model.pt")
    def load_state_dict(self,state_dict: "OrderedDict[str,Tensor]",strict: bool = False):
        """Load state_dict saved parameters
        Args:
            state_dict (OrderedDict[str,Tensor]): state_dict tensor
            strict (bool,optional): [description]. Defaults to False.
        """
        self.fc.load_state_dict(state_dict,strict=strict)
        self.fc.eval()

    mlp = RNN(input_shape=1024,n_neurons=1024,num_layers=2,n_class=2)
mlp.fit(embeddings_train,np.array(y_train),validation_data=(embeddings_test,np.array(y_test)),epochs=30)
mlp.save("model.pt")

这些是发生的错误。我真的需要帮助,我会随时为您提供更多信息。

错误 1

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-154-e5fde11a675c> in <module>
      1 # init MLP model,train it on the data,then save model
      2 mlp = RNN(input_shape=1024,n_class=2)
----> 3 mlp.fit(embeddings_train,epochs=30)
      4 mlp.save("model.pt")

<ipython-input-153-a8d51af53bb5> in fit(self,x,epochs,batch_size,class_weight,validation_data,**kwargs)
    134             val_loader = None
    135         self.trainer = pl.Trainer(max_epochs=epochs,**kwargs)
--> 136         self.trainer.fit(self,val_loader)
    137     def predict(self,x):
    138         """Run inference on data."""

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self,model,train_dataloader,val_dataloaders,datamodule)
    456         )
    457 
--> 458         self._run(model)
    459 
    460         assert self.state.stopped

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in _run(self,model)
    754 
    755         # dispatch `start_training` or `start_evaluating` or `start_predicting`
--> 756         self.dispatch()
    757 
    758         # plugin will finalized fitting (e.g. ddp_spawn will load trained model)

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in dispatch(self)
    795             self.accelerator.start_predicting(self)
    796         else:
--> 797             self.accelerator.start_training(self)
    798 
    799     def run_stage(self):

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in start_training(self,trainer)
     94 
     95     def start_training(self,trainer: 'pl.Trainer') -> None:
---> 96         self.training_type_plugin.start_training(trainer)
     97 
     98     def start_evaluating(self,trainer: 'pl.Trainer') -> None:

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in start_training(self,trainer)
    142     def start_training(self,trainer: 'pl.Trainer') -> None:
    143         # double dispatch to initiate the training loop
--> 144         self._results = trainer.run_stage()
    145 
    146     def start_evaluating(self,trainer: 'pl.Trainer') -> None:

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_stage(self)
    805         if self.predicting:
    806             return self.run_predict()
--> 807         return self.run_train()
    808 
    809     def _pre_training_routine(self):

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_train(self)
    840             self.progress_bar_callback.disable()
    841 
--> 842         self.run_sanity_check(self.lightning_module)
    843 
    844         self.checkpoint_connector.has_trained = False

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self,ref_model)
   1105 
   1106             # run eval step
-> 1107             self.run_evaluation()
   1108 
   1109             self.on_sanity_check_end()

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self,on_epoch)
    960                 # lightning module methods
    961                 with self.profiler.profile("evaluation_step_and_end"):
--> 962                     output = self.evaluation_loop.evaluation_step(batch,batch_idx,dataloader_idx)
    963                     output = self.evaluation_loop.evaluation_step_end(output)
    964 

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self,dataloader_idx)
    172             model_ref._current_fx_name = "validation_step"
    173             with self.trainer.profiler.profile("validation_step"):
--> 174                 output = self.trainer.accelerator.validation_step(args)
    175 
    176         # capture any logged information

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in validation_step(self,args)
    224 
    225         with self.precision_plugin.val_step_context(),self.training_type_plugin.val_step_context():
--> 226             return self.training_type_plugin.validation_step(*args)
    227 
    228     def test_step(self,args: List[Union[Any,int]]) -> Optional[STEP_OUTPUT]:

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in validation_step(self,*args,**kwargs)
    159 
    160     def validation_step(self,**kwargs):
--> 161         return self.lightning_module.validation_step(*args,**kwargs)
    162 
    163     def test_step(self,**kwargs):

<ipython-input-153-a8d51af53bb5> in validation_step(self,batch_idx)
     78         if self.output_shape > 1:
     79             y_hat = torch.log(y_hat)
---> 80         loss = self.loss(y_hat,y)
     81         self.log("val_loss",on_step=False)
     82         return {"val_loss": loss}

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self,*input,**kwargs)
    887             result = self._slow_forward(*input,**kwargs)
    888         else:
--> 889             result = self.forward(*input,**kwargs)
    890         for hook in itertools.chain(
    891                 _global_forward_hooks.values(),/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/modules/loss.py in forward(self,input,target)
    611     def forward(self,input: Tensor,target: Tensor) -> Tensor:
    612         assert self.weight is None or isinstance(self.weight,Tensor)
--> 613         return F.binary_cross_entropy(input,target,weight=self.weight,reduction=self.reduction)
    614 
    615 

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/functional.py in binary_cross_entropy(input,weight,size_average,reduce,reduction)
   2760         weight = weight.expand(new_size)
   2761 
-> 2762     return torch._C._nn.binary_cross_entropy(input,reduction_enum)
   2763 
   2764 

RuntimeError: all elements of input should be between 0 and 1

错误 2


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-139-b7e8b13763ef> in <module>
      1 # Model evaluation
----> 2 y_pred = mlp(embeddings_val).squeeze().detach().numpy()
      3 model_evaluation_accuracy(np.array(y_val),y_pred)

/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self,<ipython-input-136-e2fc535640ab> in forward(self,x)
     55         self.fc= nn.Linear(self.hidden_size,self.output_shape)
     56     def forward(self,x):
---> 57         h0=torch.zeros(self.num_layers,self.hidden_size).to(device)
     58         c0=torch.zeros(self.num_layers,self.hidden_size).to(device)
     59         out,c0))

NameError: name 'x_size' is not defined

解决方法

我将其添加为答案,因为评论太难了。

您遇到的主要问题是 BCE 损失。 IIRC BCE 损失期望 p(y=1),因此您的输出应该在 0 和 1 之间。如果您想使用 logits(在数值上也更稳定),您应该使用 BinaryCrossEntropyWithLogits

正如您在其中一条评论中提到的,您正在使用 sigmoid 激活函数,但我觉得您的前向函数有些不对劲。主要是你的转发功能的最后一行是

        return self.fc(x)

这不使用 sigmoid 激活。此外,您仅使用输入 x 来生成输出。 LSTM 输出刚刚被丢弃?我认为,最好添加一些打印语句或断点以确保中间输出符合您的预期。

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐


使用本地python环境可以成功执行 import pandas as pd import matplotlib.pyplot as plt # 设置字体 plt.rcParams[&#39;font.sans-serif&#39;] = [&#39;SimHei&#39;] # 能正确显示负号 p
错误1:Request method ‘DELETE‘ not supported 错误还原:controller层有一个接口,访问该接口时报错:Request method ‘DELETE‘ not supported 错误原因:没有接收到前端传入的参数,修改为如下 参考 错误2:cannot r
错误1:启动docker镜像时报错:Error response from daemon: driver failed programming external connectivity on endpoint quirky_allen 解决方法:重启docker -&gt; systemctl r
错误1:private field ‘xxx‘ is never assigned 按Altʾnter快捷键,选择第2项 参考:https://blog.csdn.net/shi_hong_fei_hei/article/details/88814070 错误2:启动时报错,不能找到主启动类 #
报错如下,通过源不能下载,最后警告pip需升级版本 Requirement already satisfied: pip in c:\users\ychen\appdata\local\programs\python\python310\lib\site-packages (22.0.4) Coll
错误1:maven打包报错 错误还原:使用maven打包项目时报错如下 [ERROR] Failed to execute goal org.apache.maven.plugins:maven-resources-plugin:3.2.0:resources (default-resources)
错误1:服务调用时报错 服务消费者模块assess通过openFeign调用服务提供者模块hires 如下为服务提供者模块hires的控制层接口 @RestController @RequestMapping(&quot;/hires&quot;) public class FeignControl
错误1:运行项目后报如下错误 解决方案 报错2:Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.8.1:compile (default-compile) on project sb 解决方案:在pom.
参考 错误原因 过滤器或拦截器在生效时,redisTemplate还没有注入 解决方案:在注入容器时就生效 @Component //项目运行时就注入Spring容器 public class RedisBean { @Resource private RedisTemplate&lt;String
使用vite构建项目报错 C:\Users\ychen\work&gt;npm init @vitejs/app @vitejs/create-app is deprecated, use npm init vite instead C:\Users\ychen\AppData\Local\npm-
参考1 参考2 解决方案 # 点击安装源 协议选择 http:// 路径填写 mirrors.aliyun.com/centos/8.3.2011/BaseOS/x86_64/os URL类型 软件库URL 其他路径 # 版本 7 mirrors.aliyun.com/centos/7/os/x86
报错1 [root@slave1 data_mocker]# kafka-console-consumer.sh --bootstrap-server slave1:9092 --topic topic_db [2023-12-19 18:31:12,770] WARN [Consumer clie
错误1 # 重写数据 hive (edu)&gt; insert overwrite table dwd_trade_cart_add_inc &gt; select data.id, &gt; data.user_id, &gt; data.course_id, &gt; date_format(
错误1 hive (edu)&gt; insert into huanhuan values(1,&#39;haoge&#39;); Query ID = root_20240110071417_fe1517ad-3607-41f4-bdcf-d00b98ac443e Total jobs = 1
报错1:执行到如下就不执行了,没有显示Successfully registered new MBean. [root@slave1 bin]# /usr/local/software/flume-1.9.0/bin/flume-ng agent -n a1 -c /usr/local/softwa
虚拟及没有启动任何服务器查看jps会显示jps,如果没有显示任何东西 [root@slave2 ~]# jps 9647 Jps 解决方案 # 进入/tmp查看 [root@slave1 dfs]# cd /tmp [root@slave1 tmp]# ll 总用量 48 drwxr-xr-x. 2
报错1 hive&gt; show databases; OK Failed with exception java.io.IOException:java.lang.RuntimeException: Error in configuring object Time taken: 0.474 se
报错1 [root@localhost ~]# vim -bash: vim: 未找到命令 安装vim yum -y install vim* # 查看是否安装成功 [root@hadoop01 hadoop]# rpm -qa |grep vim vim-X11-7.4.629-8.el7_9.x
修改hadoop配置 vi /usr/local/software/hadoop-2.9.2/etc/hadoop/yarn-site.xml # 添加如下 &lt;configuration&gt; &lt;property&gt; &lt;name&gt;yarn.nodemanager.res