Neural network RuntimeError:应为torch.FloatTensor类型的对象,但找到参数#2'的torch.cuda.DoubleTensor类型;重量';
我一直在尝试重新训练一个模型,但不幸的是,在过去的两天里,我一直得到相同的错误 你能帮点忙吗 初始工作:Neural network RuntimeError:应为torch.FloatTensor类型的对象,但找到参数#2'的torch.cuda.DoubleTensor类型;重量';,neural-network,pytorch,torch,Neural Network,Pytorch,Torch,我一直在尝试重新训练一个模型,但不幸的是,在过去的两天里,我一直得到相同的错误 你能帮点忙吗 初始工作: %matplotlib inline %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt import numpy as np import time import torch from torch import nn from torch import optim import t
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import numpy as np
import time
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
import torchvision.models as models
from collections import OrderedDict
data_dir = 'flowers'
train_dir = data_dir + '/train'
data_dir = 'flowers'
train_transforms = transforms.Compose([transforms.Resize(224),
transforms.RandomResizedCrop(224),
transforms.RandomRotation(45),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True)
import json
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
# Load a pretrained model
model = models.vgg16(pretrained=True)
# Keep the parameters the same
for param in model.parameters():
param.requires_grad = False
# and final output 102, since tht we have 102 flowers.
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, 4096)),
('relu', nn.ReLU()),
('fc3', nn.Linear(4096, 102)),
('output', nn.LogSoftmax(dim=1))
]))
# Replace model's old classifier with the new classifier
model.classifier = classifier
# Calculate the loss
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
model.to('cuda')
epochs = 1
print_every = 40
steps = 0
for e in range(epochs):
running_loss = 0
model.train()
# model = model.double()
for images, labels in iter(trainloader):
steps += 1
images.resize_(32, 3, 224, 224)
inputs = Variable(images.to('cuda'))
targets = Variable(labels.to('cuda'))
optimizer.zero_grad()
# Forward and backward passes
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
#running_loss += loss.data[0]
running_loss += loss.item()
if steps % print_every == 0:
print("Epoch: {}/{}... ".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/print_every))
数据集:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import numpy as np
import time
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
import torchvision.models as models
from collections import OrderedDict
data_dir = 'flowers'
train_dir = data_dir + '/train'
data_dir = 'flowers'
train_transforms = transforms.Compose([transforms.Resize(224),
transforms.RandomResizedCrop(224),
transforms.RandomRotation(45),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True)
import json
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
# Load a pretrained model
model = models.vgg16(pretrained=True)
# Keep the parameters the same
for param in model.parameters():
param.requires_grad = False
# and final output 102, since tht we have 102 flowers.
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, 4096)),
('relu', nn.ReLU()),
('fc3', nn.Linear(4096, 102)),
('output', nn.LogSoftmax(dim=1))
]))
# Replace model's old classifier with the new classifier
model.classifier = classifier
# Calculate the loss
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
model.to('cuda')
epochs = 1
print_every = 40
steps = 0
for e in range(epochs):
running_loss = 0
model.train()
# model = model.double()
for images, labels in iter(trainloader):
steps += 1
images.resize_(32, 3, 224, 224)
inputs = Variable(images.to('cuda'))
targets = Variable(labels.to('cuda'))
optimizer.zero_grad()
# Forward and backward passes
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
#running_loss += loss.data[0]
running_loss += loss.item()
if steps % print_every == 0:
print("Epoch: {}/{}... ".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/print_every))
尝试使用预训练模型并仅训练分类器:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import numpy as np
import time
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
import torchvision.models as models
from collections import OrderedDict
data_dir = 'flowers'
train_dir = data_dir + '/train'
data_dir = 'flowers'
train_transforms = transforms.Compose([transforms.Resize(224),
transforms.RandomResizedCrop(224),
transforms.RandomRotation(45),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True)
import json
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
# Load a pretrained model
model = models.vgg16(pretrained=True)
# Keep the parameters the same
for param in model.parameters():
param.requires_grad = False
# and final output 102, since tht we have 102 flowers.
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, 4096)),
('relu', nn.ReLU()),
('fc3', nn.Linear(4096, 102)),
('output', nn.LogSoftmax(dim=1))
]))
# Replace model's old classifier with the new classifier
model.classifier = classifier
# Calculate the loss
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
model.to('cuda')
epochs = 1
print_every = 40
steps = 0
for e in range(epochs):
running_loss = 0
model.train()
# model = model.double()
for images, labels in iter(trainloader):
steps += 1
images.resize_(32, 3, 224, 224)
inputs = Variable(images.to('cuda'))
targets = Variable(labels.to('cuda'))
optimizer.zero_grad()
# Forward and backward passes
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
#running_loss += loss.data[0]
running_loss += loss.item()
if steps % print_every == 0:
print("Epoch: {}/{}... ".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/print_every))
错误消息:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import numpy as np
import time
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
import torchvision.models as models
from collections import OrderedDict
data_dir = 'flowers'
train_dir = data_dir + '/train'
data_dir = 'flowers'
train_transforms = transforms.Compose([transforms.Resize(224),
transforms.RandomResizedCrop(224),
transforms.RandomRotation(45),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True)
import json
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
# Load a pretrained model
model = models.vgg16(pretrained=True)
# Keep the parameters the same
for param in model.parameters():
param.requires_grad = False
# and final output 102, since tht we have 102 flowers.
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, 4096)),
('relu', nn.ReLU()),
('fc3', nn.Linear(4096, 102)),
('output', nn.LogSoftmax(dim=1))
]))
# Replace model's old classifier with the new classifier
model.classifier = classifier
# Calculate the loss
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
model.to('cuda')
epochs = 1
print_every = 40
steps = 0
for e in range(epochs):
running_loss = 0
model.train()
# model = model.double()
for images, labels in iter(trainloader):
steps += 1
images.resize_(32, 3, 224, 224)
inputs = Variable(images.to('cuda'))
targets = Variable(labels.to('cuda'))
optimizer.zero_grad()
# Forward and backward passes
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
#running_loss += loss.data[0]
running_loss += loss.item()
if steps % print_every == 0:
print("Epoch: {}/{}... ".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/print_every))
运行时错误:预期对象类型为torch.FloatTensor
,但为参数#2weight找到类型为torch.cuda.DoubleTensor
如果你想在pyTorch中使用gpu,你必须确保操作流程(这是你的型号)和数据都传输到cuda设备
通过这个(1)我的意思是,整个工作流程,包括预训练模型
,标准
,分类器
和输入
都应分配给cuda设备
如果(2)不能简单地由model.cuda()
保证,则可能需要对所有对象手动执行该操作,以确保内部权重和输入数据均为cuda浮点类型
希望能有所帮助。
您应该已将输入传递到前馈网络,但已将图像传递到网络
你能提供更多关于追踪的信息吗?我添加了全部代码。这有帮助吗?不,代码在哪一行断开?你能特别指定'model=model.to'cuda'吗?我现在试试这个。