如何解决语义分割中的Pytorch运行时错误
我正在尝试在this博客文章中重现语义分段。我做了一些小的调整,但是仍然无法训练模型。我总是收到运行时错误: 给定组= 1,权重为[512,1024,1,1],预期输入[4,512,188,188]具有1024个通道,但得到512个通道 。我试图取消输入的内容,但没有更改。
这是网络
class ConvRelu(nn.Module):
def __init__(self,in_depth,out_depth):
super(ConvRelu,self).__init__()
self.conv = torch.nn.Conv2d(in_depth,out_depth,kernel_size=3,stride=1,padding=1)
self.activation = nn.ReLU(inplace=True)
def forward(self,x):
x = self.conv(x)
x = self.activation(x)
return x
class DecoderBlock(nn.Module,ABC):
def __init__(self,middle_depth,out_depth):
super(DecoderBlock,self).__init__()
self.conv_relu = ConvRelu(in_depth,middle_depth)
self.conv_transpose = nn.ConvTranspose2d(middle_depth,kernel_size=4,stride=2,x):
x = self.conv_relu(x)
x = self.conv_transpose(x)
x = self.activation(x)
return x
class UNetresnet(nn.Module):
def __init__(self,num_classes):
super(UNetresnet,self).__init__()
self.encoder = torchvision.models.resnet101(pretrained=True)
self.pool = nn.MaxPool2d(2,2)
self.conv1 = nn.Sequential(self.encoder.conv1,self.encoder.bn1,self.encoder.relu,self.pool)
self.conv2 = self.encoder.layer1
self.conv3 = self.encoder.layer2
self.conv4 = self.encoder.layer3
self.conv4 = self.encoder.layer4
self.pool = nn.MaxPool2d(2,2)
self.center = DecoderBlock(2048,512,256)
self.dec5 = DecoderBlock(2048 + 256,256)
self.dec4 = DecoderBlock(1024 + 256,256)
self.dec3 = DecoderBlock(512 + 256,256,64)
self.dec2 = DecoderBlock(256 + 64,128,128)
self.dec1 = DecoderBlock(128,32)
self.dec0 = ConvRelu(32,32)
self.final = nn.Conv2d(32,num_classes,kernel_size=1)
def forward(self,x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
pool = self.pool(conv5)
center = self.center(pool)
dec5 = self.dec5(torch.cat([center,conv5],1))
dec4 = self.dec4(torch.cat([dec5,conv4],1))
dec3 = self.dec4(torch.cat([dec4,conv3],1))
dec2 = self.dec4(torch.cat([dec3,conv2],1))
dec1 = self.dec1(dec2)
dec0 = self.dec0(dec1)
return self.final(dec0)```
and here's the training loop
for epoch_idx in range(2):
loss_batches = []
for batch_idx,data in enumerate(tqdm(train_DataLoader,desc = "training")):
imgs = torch.autograd.Variable(data['sat_img'].cuda())
#imgs = imgs.view(1,-1)
#imgs.unsqueeze(0)
#imgs.squeeze()
masks = torch.autograd.Variable(data['map_img'].cuda())
masks.unsqueeze_(0)
#print(imgs.size())
y = unet_resnet(imgs)
loss = cross_entropy_loss(y,masks)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_batches.append(loss.data.cpu().numpy())
print('epoch: ' + str(epoch_idx) + ' training loss: ' + str(np.sum(loss_batches)))
model_file = './unet-' + str(epoch_idx)
unet_resnet = unet_resnet.cpu()
torch.save(unet_resnet.state_dict(),model_file)
unet_resnet = unet_resnet.cuda()
print('model saved') ```
请我需要一些帮助。根据错误消息,错误是在网络中的 conv4 上。我需要在那里更改什么或如何调整输入? 输入的图像是PIL图像
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。