调试Pytorch优化器
当我在代码上运行调试Pytorch优化器,pytorch,Pytorch,当我在代码上运行optimizer.step时,我得到了这个错误 运行时错误:未为“torch.LongTensor”实现sqrt C:\Program Files\Anaconda3\lib\site-packages\IPython\core\magic.py in <lambda>(f, *a, **k) 186 # but it's overkill for just that one bit of state. 187 def magic_d
optimizer.step
时,我得到了这个错误
运行时错误:未为“torch.LongTensor”实现sqrt
C:\Program Files\Anaconda3\lib\site-packages\IPython\core\magic.py in <lambda>(f, *a, **k)
186 # but it's overkill for just that one bit of state.
187 def magic_deco(arg):
--> 188 call = lambda f, *a, **k: f(*a, **k)
189
190 if callable(arg):
C:\Program Files\Anaconda3\lib\site-packages\IPython\core\magics\execution.py in time(self, line, cell, local_ns)
1178 else:
1179 st = clock2()
-> 1180 exec(code, glob, local_ns)
1181 end = clock2()
1182 out = None
<timed exec> in <module>()
C:\Program Files\Anaconda3\lib\site-packages\torch\optim\adam.py in step(self, closure)
98 denom = max_exp_avg_sq.sqrt().add_(group['eps'])
99 else:
--> 100 denom = exp_avg_sq.sqrt().add_(group['eps'])
101
102 bias_correction1 = 1 - beta1 ** state['step']
RuntimeError: sqrt not implemented for 'torch.LongTensor'
这是自定义损失:
class CustomLoss(_Loss):
def __init__(self, size_average=True, reduce=True):
super(CustomLoss, self).__init__(size_average, reduce)
def forward(self, S, N, M, type='softmax',):
return self.loss_cal(S, N, M, type)
def loss_cal(self, S, N, M, type="softmax",):
self.A = torch.cat([S[i * M:(i + 1) * M, i:(i + 1)]
for i in range(N)], dim=0)
if type == "softmax":
self.B = torch.log(torch.sum(torch.exp(S.float()), dim=1, keepdim=True) + 1e-8)
total = torch.abs(torch.sum(self.A - self.B))
else:
raise AssertionError("loss type should be softmax or contrast !")
return total
最后,这是主文件
model=LSTM()
optimizer = optim.Adam(list(model.parameters()), lr=LEARNING_RATE)
model = model.to(device)
best_loss = 100.
generator = SpeakerVerificationDataset()
dataloader = DataLoader(generator, batch_size=4,
shuffle=True, num_workers=0)
loss_history = []
update_counter = 1
for epoch in range(NUM_EPOCHS):
print("Epoch # : ", epoch + 1)
for step in range(STEPS_PER_EPOCH):
# get batch dataset
for i_batch, sample_batched in enumerate(dataloader):
print(sample_batched['MelData'].size())
inputs = sample_batched['MelData'].float()
inputs=sample_batched['MelData'].view(180, M*N, 40).float()
print((inputs.size()))
inputs = inputs
#print(here)
# remove previous gradients
optimizer.zero_grad()
# get gradients and loss at this iteration
#predictions,state,w,b = model(inputs)
predictions = model(inputs)
w = model.w
b = model.b
predictions = similarity(output=predictions,w=w,b=b)
#loss = CustomLoss()
S = predictions
loss_func = CustomLoss()
loss = loss_func.loss_cal(S=S,N=N,M=M)
loss.backward()
# update the weights
print("start optimizing")
optimizer.step()
loss_history.append(loss.item())
print(update_counter, ":", loss_history[-1])
update_counter += 1
print()
# save the weights
torch.save(model.state_dict(), CHECKPOINT_PATH)
print("Saving weights")
print()
print()
错误来自这里:
weight = torch.tensor([10])
bias = torch.tensor([-5])
self.w = nn.Parameter(weight)
self.b = nn.Parameter(bias)
必须把它改成
weight = torch.tensor([10.0])
bias = torch.tensor([-5.0])
self.w = nn.Parameter(weight)
self.b = nn.Parameter(bias)
你介意添加相关的代码吗?你会考虑相关的代码吗?整个代码?至少是模型或相关操作之前,我基本上附上了整个代码。我不确定哪些与adam优化器相关。这就是为什么我想知道如何调试这个错误。我从哪里开始?是的,如果您传递int列表,它将初始化为long:D
weight = torch.tensor([10.0])
bias = torch.tensor([-5.0])
self.w = nn.Parameter(weight)
self.b = nn.Parameter(bias)