Python 如何解决该项目的shpe和重建CNN?

Python 如何解决该项目的shpe和重建CNN?,python,deep-learning,pytorch,conv-neural-network,medical-imaging,Python,Deep Learning,Pytorch,Conv Neural Network,Medical Imaging,当我对这个网络进行医学图像数据培训时 -训练 -温和的 -正常的 -癌症 -试验 -温和的 -正常的 -癌症 -有效的 -温和的 -正常的 -癌症 我在训练时出错 这是数据加载。 导入操作系统 进口火炬 从torchvision导入数据集,转换 ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes from PI

当我对这个网络进行医学图像数据培训时 -训练 -温和的 -正常的 -癌症 -试验 -温和的 -正常的 -癌症 -有效的 -温和的 -正常的 -癌症 我在训练时出错

这是数据加载。 导入操作系统 进口火炬 从torchvision导入数据集,转换

### TODO: Write data loaders for training, validation, and test sets
## Specify appropriate transforms, and batch_sizes
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True

# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 32

data_transform_train = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
data_transform_test = transforms.Compose([
    transforms.Resize(234),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

data_dir = '/content/drive/MyDrive/COVID-19  Database/COVID'
train_dir = os.path.join(data_dir, 'train')
valid_dir = os.path.join(data_dir, 'valid')
test_dir = os.path.join(data_dir, 'test')

train_data = datasets.ImageFolder(train_dir, transform=data_transform_train)
valid_data = datasets.ImageFolder(valid_dir, transform=data_transform_test)
test_data = datasets.ImageFolder(test_dir, transform=data_transform_test)

train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers, shuffle=True)

loaders_scratch = {
    'train' : train_loader,
    'valid' : valid_loader,
    'test'  : test_loader
}
在这里从头做一个模型

import torch.nn as nn
import torch.nn.functional as F

# define the CNN architecture
class Net(nn.Module):
    ### TODO: choose an architecture, and complete the class
    def __init__(self):
        super(Net, self).__init__()
        ## Define layers of a CNN
        self.conv1 = nn.Conv2d(1, 128, 3) #(224-3)/1+1= 222
        self.conv2 = nn.Conv2d(128, 64, 3) #110 after pooling with (2,2) ==>(110-3)/1+1=108
        self.conv3 = nn.Conv2d(64, 64, 3) # 54 after pooling with (2,2) ==> 110/2=54 ==>(54-3)/1+1=52
        self.conv4 = nn.Conv2d(64, 32, 3) # 26 after pooling with (2,2) ==> 52/2=26  ==>(26-3)/1+1=24
        self.conv5 = nn.Conv2d(32, 16, 3) # 12 after pooling with (2,2) ==> 24/2=12 ==> (12-3)/1+1=10
        self.conv6 = nn.Conv2d(16, 8, 3) # 5 after pooling with (2,2) ==> 10/2=2
        self.pool = nn.MaxPool2d(2, 2)
        self.fc1 = nn.Linear(8 * 5 * 5, 160) #8 is a out_channel(number of filter) of last conv layer and 5 is the output of last conv layer after pooling(200 input to fc1)
        self.fc2 = nn.Linear(160, 3) #166 is the output of the fc1 as input to fc2 and 133 output classes
        self.dropout25 = nn.Dropout(p=0.5) # 50% dropout of nodes
        self.softmax = nn.Softmax(dim = 1)
        
    
    def forward(self, x):
        ## Define forward behavior
        x = F.relu(self.conv1(x))
        x = self.pool(F.relu(self.conv2(x)))
        x = self.pool(F.relu(self.conv3(x)))
        x = self.pool(F.relu(self.conv4(x)))
        x = self.pool(F.relu(self.conv5(x)))
        x = self.pool(F.relu(self.conv6(x)))
        x = x.view(x.size(0), -1)
        x = F.relu(self.fc1(x))
        x = self.dropout25(x)
        x = self.fc2(x)
        x = self.softmax(x)
        
        return x

#-#-# You so NOT have to modify the code below this line. #-#-#

# instantiate the CNN
model_scratch = Net()

use_cuda = torch.cuda.is_available()

# move tensors to GPU if CUDA is available
if use_cuda:
    model_scratch.cuda()
print(model_scratch)
这里我定义了损失和优化器

import torch.optim as optim

### TODO: select loss function
criterion_scratch = nn.CrossEntropyLoss()

### TODO: select optimizer
optimizer_scratch = optim.Adam(model_scratch.parameters(), lr = 0.001)
做一次训练,错误我出现在这里

import numpy as np 
def train(n_epochs, loaders, model, optimizer, criterion,use_cuda,save_path):
  """returns trained model"""
  # initialize tracker for maxi validation loss
  valid_loss_min = np.Inf 
    
    for epoch in range(1, n_epochs+1):
        # initialize variables to monitor training and validation loss
        train_loss = 0.0
        valid_loss = 0.0
        
        ###################
        # train the model #
        ###################
        model.train()
        for batch_idx, (data, target) in enumerate(loaders['train']):
           # move to GPU
            if use_cuda:
                data, target = data.cuda(), target.cuda()
            ## find the loss and update the model parameters accordingly
            ## record the average training loss, using something like
            ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss))
            
            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output,target)
            loss.backward()
            optimizer.step()            
            train_loss += loss.item()*data.size(0)
        ######################    
        # validate the model #
        ######################
        model.eval()
        for batch_idx, (data, target) in enumerate(loaders['valid']):
            # move to GPU
            if use_cuda:
                data, target = data.cuda(), target.cuda()
            ## update the average validation loss
            
            output = model(data)
            loss = criterion(output,target)
            
            valid_loss += loss.item()*data.size(0)
        
        train_loss = train_loss/len(loaders['train'].dataset)
        valid_loss = valid_loss/len(loaders['valid'].dataset)
        
        # print training/validation statistics 
        print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
            epoch, 
            train_loss,
            valid_loss
            ))
        
        ## TODO: save the model if validation loss has decreased
        if valid_loss <= valid_loss_min:
            print('Validation loss decreased ({:.6f} --> {:.6f}).  Saving model ...'.format(
            valid_loss_min,
            valid_loss))
            torch.save(model.state_dict(),save_path)
            valid_loss_min = valid_loss
            # return trained model
    return model


# train the model
model_scratch = train(15, loaders_scratch, model_scratch, optimizer_scratch, 
                      criterion_scratch, use_cuda, 'model_scratch.pt')

# load the model that got the best validation accuracy
model_scratch.load_state_dict(torch.load('model_scratch.pt'))
将numpy导入为np
def系列(n_时代、装载机、模型、优化器、标准、使用cuda、保存路径):
“”“返回经过训练的模型”“”
#初始化跟踪程序以进行最大验证丢失
有效损失最小值=np.Inf
对于范围内的历元(1,n_历元+1):
#初始化变量以监控培训和验证丢失
列车损耗=0.0
有效损失=0.0
###################
#训练模型#
###################
模型列车()
对于enumerate(装载机['train'])中的批次_idx(数据,目标):
#转到GPU
如果使用_cuda:
data,target=data.cuda(),target.cuda()
##找到损失并相应地更新模型参数
##使用以下方法记录平均训练损失
##列车损失=列车损失+((1/(批次idx+1))*(损失数据-列车损失))
optimizer.zero_grad()
输出=模型(数据)
损失=标准(输出、目标)
loss.backward()
optimizer.step()
列车损失+=损失项目()*数据大小(0)
######################    
#验证模型#
######################
model.eval()
对于枚举(加载程序['valid'])中的批处理_idx,(数据,目标):
#转到GPU
如果使用_cuda:
data,target=data.cuda(),target.cuda()
##更新平均验证损失
输出=模型(数据)
损失=标准(输出、目标)
有效的_loss+=loss.item()*数据大小(0)
列车损失=列车损失/长度(装载机['train'].数据集)
valid_loss=valid_loss/len(装载机['valid'].数据集)
#打印培训/验证统计数据
打印('Epoch:{}\t培训损失:{.6f}\t验证损失:{.6f}'。格式(
纪元
火车失事,
有效损失
))
##TODO:如果验证丢失减少,则保存模型
如果有效,请使用“model\u scratch.pt”(cuda)
69
70#加载获得最佳验证精度的模型
5帧
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/conv.py in_conv_forward(自身、输入、重量、偏差)
394对(0),自膨胀,自组)
395返回F.conv2d(输入、重量、偏差、自我步幅、,
-->396自填充、自膨胀、自组)
397
398 def forward(自身,输入:张量)->张量:
RuntimeError:给定的组=1,大小的权重[128,1,3,3],期望输入[32,3,224,224]有1个通道,但得到了3个通道

这是因为您有一个模型定义,其中包含
1个通道
…并且您的
数据集
类包含
3个通道的图像

所以在你的模型中应该写为

import torch.nn as nn
import torch.nn.functional as F


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        ## Define layers of a CNN
        self.conv1 = nn.Conv2d(3, 128, 3) #(224-3)/1+1= 222
        self.conv2 = nn.Conv2d(128, 64, 3) #110 after pooling with (2,2) ==>(110-3)/1+1=108
        self.conv3 = nn.Conv2d(64, 64, 3)
        .
        .
        .
   
简言之,将self.conv1=nn.Conv2d(1128,3)
转换为这个
self.conv1=nn.Conv2d(3128,3)#(224-3)/1+1=222

编辑:直到你这样做(下面的代码),你的图像仍将在3频道

data_transform = transforms.Compose([transforms.Grayscale(num_output_channels=1),
                                     transforms.ToTensor()])

dataset = ImageFolder(root, transform=data_transform)

因此,上面的代码是必要的,以使
单通道
输入

但我的图像是灰度的,输入通道应该是1!对吗?请参阅my edit@Heshamu虽然您的rom中有黑白格式的图像,但它们仍然有3个通道…默认情况下,最好使用opencv的
imread
方法,并将
0
1
作为第二个参数
data_transform = transforms.Compose([transforms.Grayscale(num_output_channels=1),
                                     transforms.ToTensor()])

dataset = ImageFolder(root, transform=data_transform)