Deep learning 自定义模型在训练时占用太多内存

Deep learning 自定义模型在训练时占用太多内存,deep-learning,pytorch,Deep Learning,Pytorch,我正在用Pytorch重新实现ResNet模型。除了模型在训练时占用了大量内存并导致OOM(内存不足)错误外,其他一切都运行良好。我试着调试,看到在调用optimizer.step()后,GPU内存增加了(4Gb到11Gb)。当我用Pytorch中的官方ResNet模型替换我的定制模型时,OOM错误不会出现。我的定制模型甚至比官方模型的参数更少(23M

我正在用
Pytorch
重新实现
ResNet
模型。除了模型在训练时占用了大量内存并导致OOM(内存不足)错误外,其他一切都运行良好。我试着调试,看到在调用
optimizer.step()
后,GPU内存增加了(4Gb到11Gb)。当我用
Pytorch
中的官方
ResNet
模型替换我的定制模型时,OOM错误不会出现。我的定制模型甚至比官方模型的参数更少(23M<25M)。如果您已经尝试用Pytorch重新实现模型,您能帮我找出我犯的错误吗?下面是我重新实现模型的所有代码,感谢您的帮助,谢谢

import torch.nn as nn
from torch.nn import Conv2d, BatchNorm2d, LeakyReLU, MaxPool2d, ModuleList

class ConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels, down_sampler=None, stride=1):
        super(ConvBlock, self).__init__()
        self.conv1 = Conv2d(in_channels, out_channels // 4, kernel_size=1, stride=1)
        self.batch_norm1 = BatchNorm2d(out_channels // 4)
        self.relu1 = LeakyReLU()

        self.conv2 = Conv2d(out_channels // 4, out_channels // 4, kernel_size=3, stride=stride, padding=1)
        self.batch_norm2 = BatchNorm2d(out_channels // 4)
        self.relu2 = LeakyReLU()

        self.conv3 = Conv2d(out_channels // 4, out_channels, kernel_size=1, stride=1)
        self.batch_norm3 = BatchNorm2d(out_channels)
        self.relu3 = LeakyReLU()

        self.down_sampler = down_sampler

    def forward(self, x):
        identity = x
        x = self.relu1(self.batch_norm1(self.conv1(x)))
        x = self.relu2(self.batch_norm2(self.conv2(x)))
        x = self.batch_norm3(self.conv3(x))
        if self.down_sampler is not None:
            identity = self.down_sampler(identity)
        x = x + identity
        x = self.relu3(x)
        return x


class ConvGroup(nn.Module):
    def __init__(self, in_channels, out_channels, number_of_conv=1, stride=1):
        super(ConvGroup, self).__init__()
        layers = []
        down_sampler = nn.Sequential(Conv2d(in_channels, out_channels, kernel_size=1, stride=stride),
                                     nn.BatchNorm2d(out_channels))
        layers.append(ConvBlock(in_channels, out_channels, down_sampler, stride=stride))
        for i in range(number_of_conv - 1):
            layers.append(ConvBlock(out_channels, out_channels, down_sampler=None))
        self.conv_group = nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv_group(x)
        return x


class ResNet(nn.Module):
    R_50 = [3, 4, 6, 3]
    R_101 = [3, 4, 23, 3]
    R_152 = [3, 8, 36, 3]
    STRIDES = [1, 2, 2, 2]

    def __init__(self, type=R_50):
        super(ResNet, self).__init__()
        self.layers = ModuleList()
        self.conv1 = Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3)
        self.max_pool = MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.relu = LeakyReLU()
        self.output_channels = 256
        in_channels = 64
        out_channels = 256
        for i in range(len(type)):
            self.layers.append(
                ConvGroup(in_channels=in_channels, out_channels=out_channels, number_of_conv=type[i],
                          stride=ResNet.STRIDES[i]))
            in_channels = out_channels
            out_channels = out_channels * 2

    def forward(self, x):
        x = self.conv1(x)
        x = self.max_pool(x)
        x = self.relu(x)
        c2 = self.layers[0](x)
        c3 = self.layers[1](c2)
        c4 = self.layers[2](c3)
        c5 = self.layers[3](c4)
        return c5, c4, c3, c2