如何解决AttributeError: 'tuple' 对象没有属性 'train_dataloader'
我有一个 3 文件。在 datamodule
文件中,我创建了数据并使用了 PyTorch Lightning
的基本格式。在 linear_model
中,我基于此 page 创建了一个 linear regression model
。最后,我有一个 train
文件,我正在调用模型并尝试拟合数据。但我收到此错误
GPU available: False,used: False
TPU available: False,using: 0 TPU cores
Traceback (most recent call last):
File "/usr/lib/python3.8/runpy.py",line 194,in _run_module_as_main
return _run_code(code,main_globals,None,File "/usr/lib/python3.8/runpy.py",line 87,in _run_code
exec(code,run_globals)
File "/home/mostafiz/Dropbox/MSc/Thesis/regreesion_EC/src/test_train.py",line 10,in <module>
train_dataloader=datamodule.DataModuleClass().setup().train_dataloader(),AttributeError: 'tuple' object has no attribute 'train_dataloader'
示例数据模块文件
class DataModuleClass(pl.LightningDataModule):
def __init__(self):
super().__init__()
self.sigma = 5
self.batch_size = 10
self.prepare_data()
def prepare_data(self):
x = np.random.uniform(0,10,10)
e = np.random.normal(0,self.sigma,len(x))
y = x + e
X = np.transpose(np.array([x,e]))
self.x_train_tensor = torch.from_numpy(X).float().to(device)
self.y_train_tensor = torch.from_numpy(y).float().to(device)
training_dataset = TensorDataset(self.x_train_tensor,self.y_train_tensor)
self.training_dataset = training_dataset
def setup(self):
data = self.training_dataset
self.train_data,self.val_data = random_split(data,[8,2])
return self.train_data,self.val_data
def train_dataloader(self):
return DataLoader(self.train_data)
def val_dataloader(self):
return DataLoader(self.val_data)
样本训练文件
from . import datamodule,linear_model
model = linear_model.LinearRegression(input_dim=2,l1_strength=1,l2_strength=1)
trainer = pl.Trainer()
trainer.fit(model,train_dataloader=datamodule.DataModuleClass().setup().train_dataloader(),val_dataloaders=datamodule.DataModuleClass().setup().val_dataloaders())
如果您需要更多代码或解释,请告诉我。
更新(根据评论)
现在,从 self.prepare_data()
的 __init__()
中删除 DataModuleClass()
、从 return self.train_data,self.val_data
中删除 setup()
并更改 { {1}} 个文件到
test
错误:
data_module = datamodule.DataModuleClass()
trainer = pl.Trainer()
trainer.fit(model,data_module)
解决方法
大多数事情都是正确的,除了少数事情:
def prepare_data(self):
这个函数是正确的,只是它不应该返回任何东西。
还有一件事
def setup(self,stage=None):
需要 stage 变量,如果我们不想在不同的测试和训练阶段之间切换,可以将其设置为默认值 none。
把所有东西放在一起,这是代码:
from argparse import ArgumentParser
import numpy as np
import pytorch_lightning as pl
from torch.utils.data import random_split,DataLoader,TensorDataset
import torch
from torch.autograd import Variable
from torchvision import transforms
import pytorch_lightning as pl
import torch
from torch import nn
from torch.nn import functional as F
from torch.optim import Adam
from torch.optim.optimizer import Optimizer
class LinearRegression(pl.LightningModule):
def __init__(
self,input_dim: int = 2,output_dim: int = 1,bias: bool = True,learning_rate: float = 1e-4,optimizer: Optimizer = Adam,l1_strength: float = 0.0,l2_strength: float = 0.0
):
super().__init__()
self.save_hyperparameters()
self.optimizer = optimizer
self.linear = nn.Linear(in_features=self.hparams.input_dim,out_features=self.hparams.output_dim,bias=bias)
def forward(self,x):
y_hat = self.linear(x)
return y_hat
def training_step(self,batch,batch_idx):
x,y = batch
# flatten any input
x = x.view(x.size(0),-1)
y_hat = self(x)
loss = F.mse_loss(y_hat,y,reduction='sum')
# L1 regularizer
if self.hparams.l1_strength > 0:
l1_reg = sum(param.abs().sum() for param in self.parameters())
loss += self.hparams.l1_strength * l1_reg
# L2 regularizer
if self.hparams.l2_strength > 0:
l2_reg = sum(param.pow(2).sum() for param in self.parameters())
loss += self.hparams.l2_strength * l2_reg
loss /= x.size(0)
tensorboard_logs = {'train_mse_loss': loss}
progress_bar_metrics = tensorboard_logs
return {'loss': loss,'log': tensorboard_logs,'progress_bar': progress_bar_metrics}
def validation_step(self,y = batch
x = x.view(x.size(0),-1)
y_hat = self(x)
return {'val_loss': F.mse_loss(y_hat,y)}
def validation_epoch_end(self,outputs):
val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_mse_loss': val_loss}
progress_bar_metrics = tensorboard_logs
return {'val_loss': val_loss,'progress_bar': progress_bar_metrics}
def configure_optimizers(self):
return self.optimizer(self.parameters(),lr=self.hparams.learning_rate)
np.random.seed(42)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class DataModuleClass(pl.LightningDataModule):
def __init__(self):
super().__init__()
self.sigma = 5
self.batch_size = 10
def prepare_data(self):
x = np.random.uniform(0,10,10)
e = np.random.normal(0,self.sigma,len(x))
y = x + e
X = np.transpose(np.array([x,e]))
self.x_train_tensor = torch.from_numpy(X).float().to(device)
self.y_train_tensor = torch.from_numpy(y).float().to(device)
training_dataset = TensorDataset(self.x_train_tensor,self.y_train_tensor)
self.training_dataset = training_dataset
def setup(self,stage=None):
data = self.training_dataset
self.train_data,self.val_data = random_split(data,[8,2])
def train_dataloader(self):
return DataLoader(self.train_data)
def val_dataloader(self):
return DataLoader(self.val_data)
model = LinearRegression(input_dim=2,l1_strength=1,l2_strength=1)
trainer = pl.Trainer()
dummy = DataModuleClass()
trainer.fit(model,dummy)
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。