Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/292.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python Pyrotch RNN始终为多变量时间序列提供相同的输出_Python_Deep Learning_Time Series_Pytorch_Rnn - Fatal编程技术网

Python Pyrotch RNN始终为多变量时间序列提供相同的输出

Python Pyrotch RNN始终为多变量时间序列提供相同的输出,python,deep-learning,time-series,pytorch,rnn,Python,Deep Learning,Time Series,Pytorch,Rnn,我的时间序列数据如下所示: 我试图用pytorch中的序列对序列RNN进行建模。它训练得很好,我可以看到损失在减少。但在测试中,它给出了相同的输出,与输入无关 我的模型: class RNNModel(nn.Module): def __init__(self, predictor_size, hidden_size, num_layers, dropout = 0.3, output_size=83): super(RNNModel, self).__init__() se

我的时间序列数据如下所示:

我试图用pytorch中的序列对序列RNN进行建模。它训练得很好,我可以看到损失在减少。但在测试中,它给出了相同的输出,与输入无关

我的模型:

class RNNModel(nn.Module):

def __init__(self, predictor_size, hidden_size, num_layers, dropout = 0.3, output_size=83):
    super(RNNModel, self).__init__()
    self.drop = nn.Dropout(dropout)
    self.rnn = nn.GRU(predictor_size, hidden_size, num_layers=num_layers, dropout = dropout)
    self.decoder = nn.Linear(hidden_size, output_size)
    self.init_weights()
    self.hidden_size = hidden_size
    self.num_layers = num_layers

def init_weights(self):
    initrange = 0.1
    self.decoder.bias.data.fill_(0)
    self.decoder.weight.data.uniform_(-initrange, initrange)

def forward(self, input, hidden):
    output, hidden = self.rnn(input, hidden)
    output = self.drop(output)
    decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))
    return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden

def init_hidden(self, batch_size):
    weight = next(self.parameters()).data
    return Variable(weight.new(self.num_layers, batch_size, self.hidden_size).zero_())
列车方式:

def train(data_source, lr):
    # turn on training mode that enables dropout

    model.train()
    total_loss = 0
    hidden = model.init_hidden(bs_train)
    optimizer = optim.Adam(model.parameters(), lr = lr)

    for batch, i in enumerate(range(0, data_source.size(0) - 1, bptt_size)):

        data, targets = get_batch(data_source, i)

        # Starting each batch, we detach the hidden state from how it was previously produced
        # so that model doesen't ry to backprop to all the way start of the dataset
        # unrolling of the graph will go from the last iteration to the first iteration
        hidden = Variable(hidden.data)
        if cuda.is_available():
            hidden = hidden.cuda()
        optimizer.zero_grad()

        output, hidden = model(data, hidden)
        loss = criterion(output, targets)
        loss.backward()

        # clip_grad_norm to prevent gradient explosion
        torch.nn.utils.clip_grad_norm(model.parameters(), clip)

        optimizer.step()
        total_loss += len(data) * loss.data
        # return accumulated loss for all the iterations
        return total_loss[0] / len(data_source)
评价方法:

def evaluate(data_source):
    # turn on evaluation to disable dropout
    model.eval()
    model.train(False)
    total_loss = 0
    hidden = model.init_hidden(bs_valid)

    for i in range(0, data_source.size(0) - 1, bptt_size):
        data, targets = get_batch(data_source, i, evaluation = True)

        if cuda.is_available():
            hidden = hidden.cuda()

        output, hidden = model(data, hidden)
        total_loss += len(data) * criterion(output, targets).data
        hidden = Variable(hidden.data)

    return total_loss[0]/len(data_source)
训练循环:

best_val_loss = None
best_epoch = 0
def run(epochs, lr):
    val_losses = []
    num_epochs = []
    global best_val_loss
    global best_epoch
    for epoch in range(0, epochs):
        train_loss = train(train_set, lr)
        val_loss = evaluate(test_set)
        num_epochs.append(epoch)
        val_losses.append(val_loss)
        print("Train Loss: ", train_loss, " Validation Loss: ", val_loss)

        if not best_val_loss or val_loss < best_val_loss:
            best_val_loss = val_loss
            torch.save(model.state_dict(), "./4.model.pth")
            best_epoch = epoch
    return num_epochs, val_losses

在这里,无论我作为输入给出的数据点是什么,我总是得到相同的输出。有人能告诉我我做错了什么吗。

我对pytorch不熟悉,所以我只会在更高层次上发表评论。仅根据损失图,模型似乎是“学习”。如果在保存模型之前对模型进行评估,并且在完成培训之后,您是否获得了预期的性能?如果是这样的话,那么问题就出在代码的推理部分,否则问题就出在代码的训练部分。不,即使我在训练之后测试它,我也会得到同样的结果。我想不出这一点。如果您的学习率太高,模型可以根据总体平均值进行自我训练。我看不出您在上面设置了
lr
,如果可能的话,您能试着降低它吗?
model = RNNModel(predictor_size, hidden_size, num_layers, dropout_pct, output_size)
model.load_state_dict(torch.load("./4.model.pth"))

if cuda.is_available():
    model.cuda()

model.eval()
model.train(False)
hidden = model.init_hidden(1)
inp = torch.Tensor(var[105])
input = Variable(inp.contiguous().view(1,1,predictor_size), volatile=True)
if cuda.is_available():
    input.data = input.data.cuda()
output, hidden = model(input, hidden)
op = output.squeeze().data.cpu()
print(op)