如何解决输入大小为200的-1,784形状之间的冲突
张量的形状和它的输入大小有误差,这是有冲突的。我完全不知道该怎么办。因为我仍然是这个主题的新手,所以这就是为什么您可能会或可能不会发现根本不需要的代码行的原因。只是抬起头。如果需要,请亲自与我联系,我将向您发送.ipynb
文件
如果有问题,我可以使用MNIST handrawn数字数据集进行计算机视觉。
import torch
import torchvision
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torchvision.datasets import MNIST
import torchvision.transforms as transforms
from torch.utils.data import SubsetRandomSampler,DataLoader
dataset = MNIST(root='data/',download=True)
def split_indices(dataset,rate):
eval = int(dataset*rate/100)
index = np.random.permutation(dataset)
return index[eval:],index[:eval]
train_index,eval_index = split_indices(len(dataset),rate=20)
dataset = MNIST(root='data/',train=True,transform=transforms.ToTensor())
train_sampler = SubsetRandomSampler(train_index)
train_dl = DataLoader(dataset,batch_size=200,sampler=train_sampler)
val_sampler = SubsetRandomSampler(eval_index)
val_dl = DataLoader(dataset,sampler=eval_sampler)
inputs = 28*28
nums = 10
model = nn.Linear(inputs,nums)
class MnistModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(inputs,nums)
def forward(self,xb):
xb = xb.reshape(-1,784)
outputs = self.linear(xb)
return outputs
def accuracy(x,y):
print(torch.sum(x == y).item()/len(x))
model = MnistModel()
for images,labels in train_dl:
outputs = model(images)
break
loss_fn = F.cross_entropy
loss = loss_fn(outputs,labels)
opt = torch.optim.Adam(model.parameters(),lr=7)
def loss_batch(model,loss_fn,xb,yb,opt=None,metric=None):
preds = model(xb)
loss = loss_fn(preds,yb)
if opt is not None:
loss.backward()
opt.step()
opt.zero_grad()
metric_result = None
if metric is not None:
metric_result = metric(preds,yb)
return loss.item(),len(xb),metric_result
def evaluate(model,valid_dl,metric=None):
with torch.no_grad():
results = [loss_batch(model,metric=metric) for xb,yb in valid_dl]
losses,nums,metrics = zip(*results)
total = np.sum(nums)
avg_loss = np.sum(np.multiply(losses,nums)) / total
avg_metric = None
if metric is not None:
avg_metric = np.sum(np.multiply(metrics,nums)) / total
return avg_loss,total,avg_metric
def accuracy(outputs,labels):
_,preds = torch.max(outputs,dim=1)
return torch.sum(preds == labels).item() / len(preds)
eval_loss,eval_acc = evaluate(model,val_dl,metric=accuracy)
print(f"loss: {eval_loss},accuracy: {eval_acc*100}")
def fit(epochs,model,opt,train_dl,metric=None):
for epoch in range(epochs):
for xb,yb in train_dl:
loss,_,_ = loss_batch(model,opt)
result = evaluate(model,metric)
eval_loss,eval_metric = result
if metric is None:
print(f"Epoch: {epoch+1},loss: {loss.item()}")
else:
print(f"Epoch: {epoch+1},loss: {loss.item()},metric: {metric.__name__} {eval_metric}")
model = MnistModel()
opt = torch.optim.Adam(model.parameters(),lr=7)
fit(5,eval_dl,accuracy) #Error line
RuntimeError Traceback (most recent call last)
<ipython-input-55-90c5585d3b40> in <module>()
1 opt = torch.optim.Adam(model.parameters(),lr=7)
----> 2 fit(5,accuracy)
3 frames
<ipython-input-49-afd130f584e4> in forward(self,xb)
18
19 def forward(self,xb):
---> 20 xb = xb.reshape(-1,784)
21 outputs = self.linear(xb)
22 return outputs
RuntimeError: shape '[-1,784]' is invalid for input of size 200
解决方法
请勿对焊枪张量使用重塑。
使用torch.nn.Flatten()展平图像。 这在您的程序中看起来将是一致的。
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。