Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/343.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 张量处的运行时错误大小不匹配_Python_Pytorch_Generative Adversarial Network - Fatal编程技术网

Python 张量处的运行时错误大小不匹配

Python 张量处的运行时错误大小不匹配,python,pytorch,generative-adversarial-network,Python,Pytorch,Generative Adversarial Network,错误消息:运行时错误:大小不匹配,m1:[64 x 3200],m2:[512 x 1]位于C:/w/1/s/windows/pytorch/aten/src\THC/generic/THCTensorMathBlas.cu:290 代码如下所示: class Generator(nn.Module): def __init__(self): super(Generator, self).__init__() self.label_emb = nn.Embedding(opt.

错误消息:运行时错误:大小不匹配,m1:[64 x 3200],m2:[512 x 1]位于C:/w/1/s/windows/pytorch/aten/src\THC/generic/THCTensorMathBlas.cu:290

代码如下所示:

class Generator(nn.Module):
def __init__(self):
    super(Generator, self).__init__()

    self.label_emb = nn.Embedding(opt.n_classes, opt.latent_dim)

    self.init_size = opt.img_size // 4  # Initial size before upsampling
    self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, 128 * self.init_size ** 2))

    self.conv_blocks = nn.Sequential(
        nn.BatchNorm2d(128),
        nn.Upsample(scale_factor=2),
        nn.Conv2d(128, 128, 3, stride=1, padding=1),
        nn.BatchNorm2d(128, 0.8),
        nn.LeakyReLU(0.2, inplace=True),
        nn.Upsample(scale_factor=2),
        nn.Conv2d(128, 64, 3, stride=1, padding=1),
        nn.BatchNorm2d(64, 0.8),
        nn.LeakyReLU(0.2, inplace=True),
        nn.Conv2d(64, opt.channels, 3, stride=1, padding=1),
        nn.Tanh(),
    )

def forward(self, noise, labels):
    gen_input = torch.mul(self.label_emb(labels), noise)
    out = self.l1(gen_input)
    out = out.view(out.shape[0], 128, self.init_size, self.init_size)
    img = self.conv_blocks(out)
    return img


class Discriminator(nn.Module):
def __init__(self):
    super(Discriminator, self).__init__()

    def discriminator_block(in_filters, out_filters, bn=True):
        """Returns layers of each discriminator block"""
        block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]
        if bn:
            block.append(nn.BatchNorm2d(out_filters, 0.8))
        return block

    self.conv_blocks = nn.Sequential(
        *discriminator_block(opt.channels, 16, bn=False),
        *discriminator_block(16, 32),
        *discriminator_block(32, 64),
        *discriminator_block(64, 128),
    )

    # The height and width of downsampled image
    ds_size = opt.img_size // 2 ** 4

    # Output layers
    self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid())
    self.aux_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, opt.n_classes), nn.Softmax())

def forward(self, img):
    out = self.conv_blocks(img)
    out = out.view(out.shape[0], -1)
    validity = self.adv_layer(out)
    label = self.aux_layer(out)

    return validity, label


# Loss functions
adversarial_loss = torch.nn.BCELoss()
auxiliary_loss = torch.nn.CrossEntropyLoss()

# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()

os.makedirs("../../data/mnist", exist_ok=True)
labels_path = 'C:/project/PyTorch-GAN/ulna/train-labels-idx1-ubyte.gz'
images_path = 'C:/project/PyTorch-GAN/ulna/train-images-idx3-ubyte.gz'
label_name = []

with gzip.open(labels_path, 'rb') as lbpath:
labels = np.frombuffer(lbpath.read(), dtype="int32", offset=8)
with gzip.open(images_path, 'rb') as imgpath:
images = np.frombuffer(imgpath.read(), dtype="int32", offset=16).reshape(len(labels),70,70,1)

hand_transform2 = transforms.Compose([
            transforms.Resize((70, 70)),
            transforms.Grayscale(1),
            transforms.ToTensor(),
            transforms.Normalize([0.5], [0.5])
            ])

#images=cv2.resize(images, (70, 70),1)
dataset1 = datasets.ImageFolder('C:/project/PyTorch-GAN/ulna/ulna', transform=hand_transform2)


dataloader = torch.utils.data.DataLoader(
dataset1,
batch_size=opt.batch_size,
shuffle=True,
)
Traceback (most recent call last):
File "acgan.py", line 225, in <module>
real_pred, real_aux = discriminator(real_imgs)
File "C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site-packages\torch\nn\modules\module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "acgan.py", line 110, in forward
validity = self.adv_layer(out)
File "C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site-packages\torch\nn\modules\module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site-packages\torch\nn\modules\container.py", line 92, in forward
input = module(input)
File "C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site-packages\torch\nn\modules\module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site-packages\torch\nn\modules\linear.py", line 87, in forward
return F.linear(input, self.weight, self.bias)
File "C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site-packages\torch\nn\functional.py", line 1370, in linear
ret = torch.addmm(bias, input, weight.t())
RuntimeError: size mismatch, m1: [64 x 3200], m2: [512 x 1] at C:/w/1/s/windows/pytorch/aten/src\THC/generic/THCTensorMathBlas.cu:290
回溯如下所示:

class Generator(nn.Module):
def __init__(self):
    super(Generator, self).__init__()

    self.label_emb = nn.Embedding(opt.n_classes, opt.latent_dim)

    self.init_size = opt.img_size // 4  # Initial size before upsampling
    self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, 128 * self.init_size ** 2))

    self.conv_blocks = nn.Sequential(
        nn.BatchNorm2d(128),
        nn.Upsample(scale_factor=2),
        nn.Conv2d(128, 128, 3, stride=1, padding=1),
        nn.BatchNorm2d(128, 0.8),
        nn.LeakyReLU(0.2, inplace=True),
        nn.Upsample(scale_factor=2),
        nn.Conv2d(128, 64, 3, stride=1, padding=1),
        nn.BatchNorm2d(64, 0.8),
        nn.LeakyReLU(0.2, inplace=True),
        nn.Conv2d(64, opt.channels, 3, stride=1, padding=1),
        nn.Tanh(),
    )

def forward(self, noise, labels):
    gen_input = torch.mul(self.label_emb(labels), noise)
    out = self.l1(gen_input)
    out = out.view(out.shape[0], 128, self.init_size, self.init_size)
    img = self.conv_blocks(out)
    return img


class Discriminator(nn.Module):
def __init__(self):
    super(Discriminator, self).__init__()

    def discriminator_block(in_filters, out_filters, bn=True):
        """Returns layers of each discriminator block"""
        block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]
        if bn:
            block.append(nn.BatchNorm2d(out_filters, 0.8))
        return block

    self.conv_blocks = nn.Sequential(
        *discriminator_block(opt.channels, 16, bn=False),
        *discriminator_block(16, 32),
        *discriminator_block(32, 64),
        *discriminator_block(64, 128),
    )

    # The height and width of downsampled image
    ds_size = opt.img_size // 2 ** 4

    # Output layers
    self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid())
    self.aux_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, opt.n_classes), nn.Softmax())

def forward(self, img):
    out = self.conv_blocks(img)
    out = out.view(out.shape[0], -1)
    validity = self.adv_layer(out)
    label = self.aux_layer(out)

    return validity, label


# Loss functions
adversarial_loss = torch.nn.BCELoss()
auxiliary_loss = torch.nn.CrossEntropyLoss()

# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()

os.makedirs("../../data/mnist", exist_ok=True)
labels_path = 'C:/project/PyTorch-GAN/ulna/train-labels-idx1-ubyte.gz'
images_path = 'C:/project/PyTorch-GAN/ulna/train-images-idx3-ubyte.gz'
label_name = []

with gzip.open(labels_path, 'rb') as lbpath:
labels = np.frombuffer(lbpath.read(), dtype="int32", offset=8)
with gzip.open(images_path, 'rb') as imgpath:
images = np.frombuffer(imgpath.read(), dtype="int32", offset=16).reshape(len(labels),70,70,1)

hand_transform2 = transforms.Compose([
            transforms.Resize((70, 70)),
            transforms.Grayscale(1),
            transforms.ToTensor(),
            transforms.Normalize([0.5], [0.5])
            ])

#images=cv2.resize(images, (70, 70),1)
dataset1 = datasets.ImageFolder('C:/project/PyTorch-GAN/ulna/ulna', transform=hand_transform2)


dataloader = torch.utils.data.DataLoader(
dataset1,
batch_size=opt.batch_size,
shuffle=True,
)
Traceback (most recent call last):
File "acgan.py", line 225, in <module>
real_pred, real_aux = discriminator(real_imgs)
File "C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site-packages\torch\nn\modules\module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "acgan.py", line 110, in forward
validity = self.adv_layer(out)
File "C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site-packages\torch\nn\modules\module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site-packages\torch\nn\modules\container.py", line 92, in forward
input = module(input)
File "C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site-packages\torch\nn\modules\module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site-packages\torch\nn\modules\linear.py", line 87, in forward
return F.linear(input, self.weight, self.bias)
File "C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site-packages\torch\nn\functional.py", line 1370, in linear
ret = torch.addmm(bias, input, weight.t())
RuntimeError: size mismatch, m1: [64 x 3200], m2: [512 x 1] at C:/w/1/s/windows/pytorch/aten/src\THC/generic/THCTensorMathBlas.cu:290
回溯(最近一次呼叫最后一次):
文件“acgan.py”,第225行,在
real\u pred,real\u aux=鉴别器(real\u imgs)
文件“C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site packages\torch\nn\modules\module.py”,第541行,在调用中__
结果=自我转发(*输入,**kwargs)
文件“acgan.py”,第110行,向前
有效性=自增强层(out)
文件“C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site packages\torch\nn\modules\module.py”,第541行,在调用中__
结果=自我转发(*输入,**kwargs)
文件“C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site packages\torch\nn\modules\container.py”,第92行,向前
输入=模块(输入)
文件“C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site packages\torch\nn\modules\module.py”,第541行,在调用中__
结果=自我转发(*输入,**kwargs)
文件“C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site packages\torch\nn\modules\linear.py”,第87行,向前
返回F.linear(输入、自重、自偏压)
文件“C:\Users\S\AppData\Local\conda\conda\envs\venv\lib\site packages\torch\nn\functional.py”,第1370行,线性格式
ret=torch.addmm(偏差、输入、重量.t())
运行时错误:大小不匹配,m1:[64 x 3200],m2:[512 x 1]位于C:/w/1/s/windows/pytorch/aten/src\THC/generic/THCTensorMathBlas.cu:290
我想练习的是GAN代码。修改前的整个GAN代码可在以下链接中找到: 输入图像是调整为70x70的x射线图像,输出图像是通过学习输入x射线图像新创建的假x射线图像。当使用minist数据库时,代码运行良好。恐怕我对代码问题一无所知。
请帮帮我!谢谢。

看起来,
opt.img\u size
可能仍然设置为32,就好像您在使用CIFAR一样。将其调整为70时,应将其设置为70

无论如何,还会出现另一个问题,因为
ds\u size=opt.img\u size//2**4
opt.img\u size=70
无效。如果需要硬编码解决方案,请设置
ds\u size=5
。这修复了鉴别器,但同样的事情也会发生在生成器上


如果您不了解如何正确修复此问题,我建议您花一些时间阅读这些模型的工作原理。如果您想按原样使用代码,我建议您使用16的倍数的
img_大小
,例如,
opt.img_大小=80
,这样您就没有问题了。为了避免其他问题,您可能希望使用
transforms.Resize((opt.img\u size,opt.img\u size))
而不是在那里硬编码
img\u size

请添加完整的回溯。问题可能是因为您更改了输入大小,并且模型中的某些内容是硬编码的,但是您没有共享模型的代码。我添加了!谢谢。回溯仍然丢失。对不起,我补充道。非常感谢。我修改了代码。然而,错误依然存在。运行时错误:大小不匹配,m1:[64 x 3200],m2:[2048 x 1]位于C:/w/1/s/windows/pytorch/aten/src\THC/generic/THCTensorMathBlas.cu:290@dayday问题是,使用70,计算
ds_size=opt.img_size//2**4
不再正确。@dayday我添加了一个简单的解决方法,否则,会出现许多其他问题,因为你没有使用16的倍数(即70)。