如何解决运行时错误:3 维权重 [128, 1024, 1] 的预期 3 维输入,但得到大小为 [32, 1024] 的二维输入
当我使用卷积层运行分类器时,出现此错误。我试图重塑,但我认为我没有正确地做到这一点。它用于对预训练的蛋白质嵌入进行分类。有人能帮我解决这个形状和尺寸的问题吗,我将不胜感激?
import typing
from collections import Counter,OrderedDict
from typing import Optional
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn.functional as F # noqa
from deepchain import log
from sklearn.model_selection import train_test_split
from sklearn.utils.class_weight import compute_class_weight
from torch import Tensor,nn
num_layers=2
hidden_size=256
from torch.utils.data import DataLoader,TensorDataset
def classification_dataloader_from_numpy(
x: np.ndarray,y: np.array,batch_size: int = 32
) -> DataLoader:
"""Build a dataloader from numpy for classification problem
This dataloader is use only for classification. It detects automatically the class of
the problem (binary or multiclass classification)
Args:
x (np.ndarray): [description]
y (np.array): [description]
batch_size (int,optional): [description]. Defaults to None.
Returns:
DataLoader: [description]
"""
n_class: int = len(np.unique(y))
if n_class > 2:
log.info("This is a classification problem with %s classes",n_class)
else:
log.info("This is a binary classification problem")
# y is float for binary classification,int for multiclass
y_tensor = torch.tensor(y).long() if len(np.unique(y)) > 2 else torch.tensor(y).float()
tensor_set = TensorDataset(torch.tensor(x).float(),y_tensor)
loader = DataLoader(tensor_set,batch_size=batch_size)
return loader
class MLP(pl.LightningModule):
"""A `pytorch` based deep learning model"""
def __init__(self,input_shape: int,n_class: int,n_neurons: int = 128,lr: float = 1e-3):
"""Instantiante a pytorch-lightning model
This model is a template MLP to learn directly from embeddings.
It's composed of 2 Dense layers with Relu activation. This template works only
for classification model.
The trainings logs are saved in the logs folder.
Args:
input_shape (int): shape input layer.
n_class (int): number of class to predict
- n_class=2 for (0/1) binary classification
- n_class>=2 for multiclass problem
n_neurons (int,optional): Number or neurons in each layer. Defaults to 128.
lr (float,optional): learning rate. Defaults to 1e-3.
"""
super().__init__()
self.n_neurons = n_neurons
self.lr = lr
self.input_shape = input_shape
self.output_shape = 1 if n_class <= 2 else n_class
self.activation = nn.Sigmoid() if n_class <= 2 else nn.Softmax(dim=-1)
self.model = nn.Sequential(
nn.Conv1d(self.input_shape,self.n_neurons,kernel_size=1),nn.ReLU(),nn.Conv1d(self.n_neurons,64,kernel_size=11),self.activation,)
def forward(self,x):
if not isinstance(x,torch.Tensor):
x = x.reshape(x.shape[0],-1)
x = torch.tensor(x).float()
return self.model(x),out
def training_step(self,batch,batch_idx):
"""training_step defined the train loop. It is independent of forward"""
x,y = batch
y_hat = self.model(x).squeeze()
y = y.squeeze()
if self.output_shape > 1:
y_hat = torch.log(y_hat)
loss = self.loss(y_hat,y)
self.log("train_loss",loss,on_epoch=True,on_step=False)
return {"loss": loss}
def validation_step(self,y)
self.log("val_loss",on_step=False)
return {"val_loss": loss}
def configure_optimizers(self):
"""(Optional) Configure training optimizers."""
return torch.optim.Adam(self.parameters(),lr=self.lr)
def compute_class_weight(self,n_class: int):
"""Compute class weight for binary/multiple classification
If n_class=2,only compute weights for the positve class.
If n>2,compute for all classes.
Args:
y ([np.array]):vector of int represented the class
n_class (int) : number fo class to use
"""
if n_class == 2:
class_count: typing.Counter = Counter(y)
cond_binary = (0 in class_count) and (1 in class_count)
assert cond_binary,"Must have O and 1 class for binary classification"
weight = class_count[0] / class_count[1]
else:
weight = compute_class_weight(class_weight="balanced",classes=np.unique(y),y=y)
return torch.tensor(weight).float()
def fit(
self,x: np.ndarray,epochs: int = 10,batch_size: int = 32,class_weight: Optional[str] = None,validation_data: bool = True,**kwargs
):
assert isinstance(x,np.ndarray),"X should be a numpy array"
assert isinstance(y,"y should be a numpy array"
assert class_weight in (
None,"balanced",),"the only choice available for class_weight is 'balanced'"
n_class = len(np.unique(y))
weight = None
self.input_shape = x.shape[1]
self.output_shape = 1 if n_class <= 2 else n_class
self.activation = nn.Sigmoid() if n_class <= 2 else nn.Softmax(dim=-1)
if class_weight == "balanced":
weight = self.compute_class_weight(y,n_class)
self.loss = nn.NLLLoss(weight) if self.output_shape > 1 else nn.BCELoss(weight)
if validation_data:
x_train,x_val,y_train,y_val = train_test_split(x,y,test_size=0.2)
train_loader = classification_dataloader_from_numpy(
x_train,batch_size=batch_size
)
val_loader = classification_dataloader_from_numpy(x_val,y_val,batch_size=batch_size)
else:
train_loader = classification_dataloader_from_numpy(x,batch_size=batch_size)
val_loader = None
self.trainer = pl.Trainer(max_epochs=epochs,**kwargs)
self.trainer.fit(self,train_loader,val_loader)
def predict(self,x):
"""Run inference on data."""
if self.output_shape is None:
log.warning("Model is not fitted. Can't do predict")
return
return self.forward(x).detach().numpy()
def save(self,path: str):
"""Save the state dict model with torch"""
torch.save(self.model.state_dict(),path)
log.info("Save state_dict parameters in model.pt")
def load_state_dict(self,state_dict: "OrderedDict[str,Tensor]",strict: bool = False):
"""Load state_dict saved parameters
Args:
state_dict (OrderedDict[str,Tensor]): state_dict tensor
strict (bool,optional): [description]. Defaults to False.
"""
self.model.load_state_dict(state_dict,strict=strict)
self.model.eval()
这是我所拥有的尺寸和形状的错误。
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-43-a90a860da852> in <module>
1 # init MLP model,train it on the data,then save model
2 mlp = MLP(input_shape=1024,n_class=2)
----> 3 mlp.fit(embeddings_train,np.array(y_train),validation_data=(embeddings_test,np.array(y_test)),epochs=300)
4 mlp.save("model.pt")
<ipython-input-42-a6b132d869b3> in fit(self,x,epochs,batch_size,class_weight,validation_data,**kwargs)
164 val_loader = None
165 self.trainer = pl.Trainer(max_epochs=epochs,**kwargs)
--> 166 self.trainer.fit(self,val_loader)
167 def predict(self,x):
168 """Run inference on data."""
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self,model,train_dataloader,val_dataloaders,datamodule)
456 )
457
--> 458 self._run(model)
459
460 assert self.state.stopped
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in _run(self,model)
754
755 # dispatch `start_training` or `start_evaluating` or `start_predicting`
--> 756 self.dispatch()
757
758 # plugin will finalized fitting (e.g. ddp_spawn will load trained model)
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in dispatch(self)
795 self.accelerator.start_predicting(self)
796 else:
--> 797 self.accelerator.start_training(self)
798
799 def run_stage(self):
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in start_training(self,trainer)
94
95 def start_training(self,trainer: 'pl.Trainer') -> None:
---> 96 self.training_type_plugin.start_training(trainer)
97
98 def start_evaluating(self,trainer: 'pl.Trainer') -> None:
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in start_training(self,trainer)
142 def start_training(self,trainer: 'pl.Trainer') -> None:
143 # double dispatch to initiate the training loop
--> 144 self._results = trainer.run_stage()
145
146 def start_evaluating(self,trainer: 'pl.Trainer') -> None:
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_stage(self)
805 if self.predicting:
806 return self.run_predict()
--> 807 return self.run_train()
808
809 def _pre_training_routine(self):
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_train(self)
840 self.progress_bar_callback.disable()
841
--> 842 self.run_sanity_check(self.lightning_module)
843
844 self.checkpoint_connector.has_trained = False
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self,ref_model)
1105
1106 # run eval step
-> 1107 self.run_evaluation()
1108
1109 self.on_sanity_check_end()
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self,on_epoch)
960 # lightning module methods
961 with self.profiler.profile("evaluation_step_and_end"):
--> 962 output = self.evaluation_loop.evaluation_step(batch,batch_idx,dataloader_idx)
963 output = self.evaluation_loop.evaluation_step_end(output)
964
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self,dataloader_idx)
172 model_ref._current_fx_name = "validation_step"
173 with self.trainer.profiler.profile("validation_step"):
--> 174 output = self.trainer.accelerator.validation_step(args)
175
176 # capture any logged information
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in validation_step(self,args)
224
225 with self.precision_plugin.val_step_context(),self.training_type_plugin.val_step_context():
--> 226 return self.training_type_plugin.validation_step(*args)
227
228 def test_step(self,args: List[Union[Any,int]]) -> Optional[STEP_OUTPUT]:
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in validation_step(self,*args,**kwargs)
159
160 def validation_step(self,**kwargs):
--> 161 return self.lightning_module.validation_step(*args,**kwargs)
162
163 def test_step(self,**kwargs):
<ipython-input-42-a6b132d869b3> in validation_step(self,batch_idx)
88 """training_step defined the train loop. It is independent of forward"""
89 x,y = batch
---> 90 y_hat = self.model(x).squeeze()
91 y = y.squeeze()
92 if self.output_shape > 1:
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self,*input,**kwargs)
887 result = self._slow_forward(*input,**kwargs)
888 else:
--> 889 result = self.forward(*input,**kwargs)
890 for hook in itertools.chain(
891 _global_forward_hooks.values(),/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/modules/container.py in forward(self,input)
117 def forward(self,input):
118 for module in self:
--> 119 input = module(input)
120 return input
121
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self,/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/modules/conv.py in forward(self,input)
261
262 def forward(self,input: Tensor) -> Tensor:
--> 263 return self._conv_forward(input,self.weight,self.bias)
264
265
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/modules/conv.py in _conv_forward(self,input,weight,bias)
258 _single(0),self.dilation,self.groups)
259 return F.conv1d(input,bias,self.stride,--> 260 self.padding,self.groups)
261
262 def forward(self,input: Tensor) -> Tensor:
RuntimeError: Expected 3-dimensional input for 3-dimensional weight [128,1024,1],but got 2-dimensional input of size [32,1024] instead
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。