微信公众号搜"智元新知"关注
微信扫一扫可直接关注哦!

我该如何解决这个项目的 shpe 并重新构建 CNN?

如何解决我该如何解决这个项目的 shpe 并重新构建 CNN?

当我用医学图像数据训练这个网络时 -火车 -良性 -普通的 -癌症 -测试 -良性 -普通的 -癌症 -有效的 -良性 -普通的 -癌症 我在训练时出错

这是数据加载。 导入操作系统 进口火炬 从 torchvision 导入数据集,转换

### Todo: Write data loaders for training,validation,and test sets
## Specify appropriate transforms,and batch_sizes
from PIL import ImageFile
ImageFile.LOAD_TruncATED_IMAGES = True

# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 32

data_transform_train = transforms.Compose([
    transforms.Resize(256),transforms.CenterCrop(224),transforms.RandomHorizontalFlip(),transforms.ToTensor(),transforms.normalize((0.5,0.5,0.5),(0.5,0.5))
    ])
data_transform_test = transforms.Compose([
    transforms.Resize(234),0.5))
    ])

data_dir = '/content/drive/MyDrive/COVID-19  Database/COVID'
train_dir = os.path.join(data_dir,'train')
valid_dir = os.path.join(data_dir,'valid')
test_dir = os.path.join(data_dir,'test')

train_data = datasets.ImageFolder(train_dir,transform=data_transform_train)
valid_data = datasets.ImageFolder(valid_dir,transform=data_transform_test)
test_data = datasets.ImageFolder(test_dir,transform=data_transform_test)

train_loader = torch.utils.data.DataLoader(train_data,batch_size=batch_size,num_workers=num_workers,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_data,shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data,shuffle=True)

loaders_scratch = {
    'train' : train_loader,'valid' : valid_loader,'test'  : test_loader
}

在这里从头开始制作模型

import torch.nn as nn
import torch.nn.functional as F

# define the CNN architecture
class Net(nn.Module):
    ### Todo: choose an architecture,and complete the class
    def __init__(self):
        super(Net,self).__init__()
        ## Define layers of a CNN
        self.conv1 = nn.Conv2d(1,128,3) #(224-3)/1+1= 222
        self.conv2 = nn.Conv2d(128,64,3) #110 after pooling with (2,2) ==>(110-3)/1+1=108
        self.conv3 = nn.Conv2d(64,3) # 54 after pooling with (2,2) ==> 110/2=54 ==>(54-3)/1+1=52
        self.conv4 = nn.Conv2d(64,32,3) # 26 after pooling with (2,2) ==> 52/2=26  ==>(26-3)/1+1=24
        self.conv5 = nn.Conv2d(32,16,3) # 12 after pooling with (2,2) ==> 24/2=12 ==> (12-3)/1+1=10
        self.conv6 = nn.Conv2d(16,8,3) # 5 after pooling with (2,2) ==> 10/2=2
        self.pool = nn.MaxPool2d(2,2)
        self.fc1 = nn.Linear(8 * 5 * 5,160) #8 is a out_channel(number of filter) of last conv layer and 5 is the output of last conv layer after pooling(200 input to fc1)
        self.fc2 = nn.Linear(160,3) #166 is the output of the fc1 as input to fc2 and 133 output classes
        self.dropout25 = nn.Dropout(p=0.5) # 50% dropout of nodes
        self.softmax = nn.softmax(dim = 1)
        
    
    def forward(self,x):
        ## Define forward behavior
        x = F.relu(self.conv1(x))
        x = self.pool(F.relu(self.conv2(x)))
        x = self.pool(F.relu(self.conv3(x)))
        x = self.pool(F.relu(self.conv4(x)))
        x = self.pool(F.relu(self.conv5(x)))
        x = self.pool(F.relu(self.conv6(x)))
        x = x.view(x.size(0),-1)
        x = F.relu(self.fc1(x))
        x = self.dropout25(x)
        x = self.fc2(x)
        x = self.softmax(x)
        
        return x

#-#-# You so NOT have to modify the code below this line. #-#-#

# instantiate the CNN
model_scratch = Net()

use_cuda = torch.cuda.is_available()

# move tensors to GPU if CUDA is available
if use_cuda:
    model_scratch.cuda()
print(model_scratch)

这里我定义了损失和优化器

import torch.optim as optim

### Todo: select loss function
criterion_scratch = nn.CrossEntropyLoss()

### Todo: select optimizer
optimizer_scratch = optim.Adam(model_scratch.parameters(),lr = 0.001)

进行一次训练,我出现在这里错误

import numpy as np 
def train(n_epochs,loaders,model,optimizer,criterion,use_cuda,save_path):
  """returns trained model"""
  # initialize tracker for maxi validation loss
  valid_loss_min = np.Inf 
    
    for epoch in range(1,n_epochs+1):
        # initialize variables to monitor training and validation loss
        train_loss = 0.0
        valid_loss = 0.0
        
        ###################
        # train the model #
        ###################
        model.train()
        for batch_idx,(data,target) in enumerate(loaders['train']):
           # move to GPU
            if use_cuda:
                data,target = data.cuda(),target.cuda()
            ## find the loss and update the model parameters accordingly
            ## record the average training loss,using something like
            ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss))
            
            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output,target)
            loss.backward()
            optimizer.step()            
            train_loss += loss.item()*data.size(0)
        ######################    
        # validate the model #
        ######################
        model.eval()
        for batch_idx,target) in enumerate(loaders['valid']):
            # move to GPU
            if use_cuda:
                data,target.cuda()
            ## update the average validation loss
            
            output = model(data)
            loss = criterion(output,target)
            
            valid_loss += loss.item()*data.size(0)
        
        train_loss = train_loss/len(loaders['train'].dataset)
        valid_loss = valid_loss/len(loaders['valid'].dataset)
        
        # print training/validation statistics 
        print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
            epoch,train_loss,valid_loss
            ))
        
        ## Todo: save the model if validation loss has decreased
        if valid_loss <= valid_loss_min:
            print('Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...'.format(
            valid_loss_min,valid_loss))
            torch.save(model.state_dict(),save_path)
            valid_loss_min = valid_loss
            # return trained model
    return model


# train the model
model_scratch = train(15,loaders_scratch,model_scratch,optimizer_scratch,criterion_scratch,'model_scratch.pt')

# load the model that got the best validation accuracy
model_scratch.load_state_dict(torch.load('model_scratch.pt'))

这是一个错误

RuntimeError                              Traceback (most recent call last)
<ipython-input-4-63f181ccccc5> in <module>()
     66 # train the model
     67 model_scratch = train(15,---> 68                       criterion_scratch,'model_scratch.pt')
     69 
     70 # load the model that got the best validation accuracy

5 frames
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/conv.py in _conv_forward(self,input,weight,bias)
    394                             _pair(0),self.dilation,self.groups)
    395         return F.conv2d(input,bias,self.stride,--> 396                         self.padding,self.groups)
    397 
    398     def forward(self,input: Tensor) -> Tensor:

RuntimeError: Given groups=1,weight of size [128,1,3,3],expected input[32,224,224] to have 1 channels,but got 3 channels instead

解决方法

这是因为您的模型定义具有 1 channel...并且您的 datasets 类具有 3 channels
的图像 所以在你的模型中应该写成

import torch.nn as nn
import torch.nn.functional as F


class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        ## Define layers of a CNN
        self.conv1 = nn.Conv2d(3,128,3) #(224-3)/1+1= 222
        self.conv2 = nn.Conv2d(128,64,3) #110 after pooling with (2,2) ==>(110-3)/1+1=108
        self.conv3 = nn.Conv2d(64,3)
        .
        .
        .
   

总之使self.conv1 = nn.Conv2d(1,3)成为这个self.conv1 = nn.Conv2d(3,3) #(224-3)/1+1= 222

编辑:在您执行此操作之前(在代码下方),您的图像仍将在 3 通道中

data_transform = transforms.Compose([transforms.Grayscale(num_output_channels=1),transforms.ToTensor()])

dataset = ImageFolder(root,transform=data_transform)

因此需要上面的代码来进行single channel输入

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。