Python 研究问题(帮助):运行时错误:输入类型(torch.FloatTensor)和权重类型(torch.cuda.FloatTensor)应该相同
不知道我的情况发生了什么: 错误消息:Python 研究问题(帮助):运行时错误:输入类型(torch.FloatTensor)和权重类型(torch.cuda.FloatTensor)应该相同,python,tensorflow,machine-learning,pytorch,gpu,Python,Tensorflow,Machine Learning,Pytorch,Gpu,不知道我的情况发生了什么: 错误消息: Traceback (most recent call last): File "plot_parametric_pytorch.py", line 127, in <module> ops = opfun(X_train[smpl]) File "plot_parametric_pytorch.py", line 81, in <lambda> opfun = lambda X: model.forward
Traceback (most recent call last):
File "plot_parametric_pytorch.py", line 127, in <module>
ops = opfun(X_train[smpl])
File "plot_parametric_pytorch.py", line 81, in <lambda>
opfun = lambda X: model.forward(Variable(torch.from_numpy(X)))
File "/mnt_home/klee/LBSBGenGapSharpnessResearch/vgg.py", line 43, in forward
x = self.features(x).to(device)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/torch/nn/modules/container.py", line 100, in forward
input = module(input)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/torch/nn/modules/conv.py", line 349, in forward
return self._conv_forward(input, self.weight)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/torch/nn/modules/conv.py", line 346, in _conv_forward
self.padding, self.dilation, self.groups)
RuntimeError: Input type (torch.FloatTensor) and weight type (torch.cuda.FloatTensor) should be the same
我的完整源代码在这里
我读这篇文章是为了了解一些信息,但我不知道如何将输入张量放到GPU上。。我试着用x.cuda()来做这件事,但不确定这是否正确。我真的很想把我的模型放在GPU上训练得更快(因为我目前只是在用CPU,否则会很慢)
[import statements]
cudnn.benchmark = True
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train.astype('float32')
X_train = np.transpose(X_train, axes=(0, 3, 1, 2))
X_test = X_test.astype('float32')
X_test = np.transpose(X_test, axes=(0, 3, 1, 2))
X_train /= 255
X_test /= 255
device = torch.device('cuda:0')
# This is where you can load any model of your choice.
# I stole PyTorch Vision's VGG network and modified it to work on CIFAR-10.
# You can take this line out and add any other network and the code
# should run just fine.
model = vgg.vgg11_bn()
model.to(device)
# Forward pass
opfun = lambda X: model.forward(Variable(torch.from_numpy(X))) <------
# Forward pass through the network given the input
predsfun = lambda op: np.argmax(op.data.numpy(), 1)
# Do the forward pass, then compute the accuracy
accfun = lambda op, y: np.mean(np.equal(predsfun(op), y.squeeze()))*100
# Initial point
x0 = deepcopy(model.state_dict())
# Number of epochs to train for
# Choose a large value since LB training needs higher values
# Changed from 150 to 30
nb_epochs = 30
batch_range = [25, 40, 50, 64, 80, 128, 256, 512, 625, 1024, 1250, 1750, 2048, 2500, 3125, 4096, 5000]
# parametric plot (i.e., don't train the network)
hotstart = False
if not hotstart:
for batch_size in batch_range:
optimizer = torch.optim.Adam(model.parameters())
model.load_state_dict(x0)
model.to(device)
average_loss_over_epoch = '-'
print('Optimizing the network with batch size %d' % batch_size)
np.random.seed(1337) #So that both networks see same sequence of batches
for e in range(nb_epochs):
model.eval()
print('Epoch:', e, ' of ', nb_epochs, 'Average loss:', average_loss_over_epoch)
average_loss_over_epoch = 0
# Checkpoint the model every epoch
torch.save(model.state_dict(), "./models/30EpochC3ExperimentBatchSize" + str(batch_size) + ".pth")
array = np.random.permutation(range(X_train.shape[0]))
slices = X_train.shape[0] // batch_size
beginning = 0
end = 1
# Training loop!
for _ in range(slices):
start_index = batch_size * beginning
end_index = batch_size * end
smpl = array[start_index:end_index]
model.train()
optimizer.zero_grad()
ops = opfun(X_train[smpl]) <-----
tgts = Variable(torch.from_numpy(y_train[smpl]).long().squeeze())
loss_fn = F.nll_loss(ops, tgts)
average_loss_over_epoch += loss_fn.data.numpy() / fractions_of_dataset
loss_fn.backward()
optimizer.step()
beginning += 1
end += 1
import torch
import torch.nn as nn
F = nn.functional
import torch.utils.model_zoo as model_zoo
import math
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://s3.amazonaws.com/pytorch/models/vgg11-fb7e83b2.pth',
'vgg13': 'https://s3.amazonaws.com/pytorch/models/vgg13-58758d87.pth',
'vgg16': 'https://s3.amazonaws.com/pytorch/models/vgg16-82412952.pth',
'vgg19': 'https://s3.amazonaws.com/pytorch/models/vgg19-341d7465.pth',
}
class VGG(nn.Module):
def __init__(self, features):
super(VGG, self).__init__()
self.features = features.cuda()
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(512, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Linear(4096, 10),
)
self._initialize_weights()
def forward(self, x):
device = torch.device('cuda:0')
x.cuda()
x.to(device)
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return F.log_softmax(x)