Python 3.x LSTM模型的损失和精度不变

Python 3.x LSTM模型的损失和精度不变,python-3.x,artificial-intelligence,pytorch,lstm,Python 3.x,Artificial Intelligence,Pytorch,Lstm,下面看一下LSTM模型 ` 我正在使用该模型对长度为300的序列进行二元分类。准确度和损失在几个时代都没有改变。我试着改变层数、隐藏状态数、激活函数,但都没有效果。我不知道我做错了什么,我可能错过了一些基本的东西。非常感谢您的帮助您正在培训的数据是什么?尝试在两个样本上过度拟合模型,一个始终分类为1,另一个分类为0。尝试用线性层替换LSTM。我们的想法是简化您的模型,直到它训练并工作为止。@Devstr数据集是一个ECG数据集,我将导致类1数据的数据(有时大于类1数据)作为类1数据的一部分,然后

下面看一下LSTM模型 `


我正在使用该模型对长度为300的序列进行二元分类。准确度和损失在几个时代都没有改变。我试着改变层数、隐藏状态数、激活函数,但都没有效果。我不知道我做错了什么,我可能错过了一些基本的东西。非常感谢您的帮助

您正在培训的数据是什么?尝试在两个样本上过度拟合模型,一个始终分类为1,另一个分类为0。尝试用线性层替换LSTM。我们的想法是简化您的模型,直到它训练并工作为止。@Devstr数据集是一个ECG数据集,我将导致类1数据的数据(有时大于类1数据)作为类1数据的一部分,然后执行移位操作以创建300的序列,并将其标记为类1。这就是为什么模型没有学习,因为class1中有太多的0类数据?
# Create LSTM Model
class LSTMModel(nn.Module):
  def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):
    super(LSTMModel, self).__init__()
    # Number of hidden dimensions
    self.hidden_dim = hidden_dim

    # Number of hidden layers
    self.layer_dim = layer_dim

    # LSTM
    self.lstm = nn.LSTM(input_dim, hidden_dim, layer_dim, batch_first=True, dropout=0.1)

    # Readout layer       
    self.f1 = nn.Linear(hidden_dim, output_dim)

    self.dropout_layer = nn.Dropout(p=0.2)
    self.softmax = nn.Softmax()
    def forward(self, x):
    # Initialize hidden state with zeros

       h0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).type(torch.FloatTensor))

    # Initialize cell state

       c0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).type(torch.FloatTensor))

    # One time step
       out, (hn, cn) = self.lstm(x, (h0,c0))
       out = self.dropout_layer(hn[-1])
       out = self.f1(out)
       out = self.softmax(out)    
       return out

    #LSTM Configuration
batch_size = 3000
num_epochs = 20
learning_rate = 0.001#Check this learning rate

# Create LSTM
input_dim = 1       # input dimension
hidden_dim = 30     # hidden layer dimension
layer_dim = 15  # number of hidden layers
output_dim = 1      # output dimension
num_layers = 10      #num_layers

print("input_dim = ", input_dim,"\nhidden_dim = ", hidden_dim,"\nlayer_dim = ", layer_dim,"\noutput_dim = ", output_dim)


model = LSTMModel(input_dim, hidden_dim, layer_dim, output_dim)
model.cuda()
error = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)


graph_index = 0
test_loss = []
train_loss = []
plt_test_index = []
plt_train_index = []
tmp_index = []
tmp_train = []
tmp_test = []


# model.init_hidden()

for epoch in range(num_epochs):

  # Train
  model.train()
  loss_list_train = []
  loss_list_test = []

  total_train = 0
  equals_train = 0
  total_test = 0
  num0_train = 0
  num1_train = 0
  num0_test = 0
  num1_test = 0
  equals_test = 0
  TP_train = 0
  FP_train = 0
  TN_train = 0
  FN_train = 0
  TP_test = 0
  FP_test = 0
  TN_test = 0
  FN_test = 0

#   for i, (inputs, targets) in enumerate(train_loader):
  for i, (inputs, targets) in enumerate(train_loader):
    train  = Variable(inputs.type(torch.FloatTensor).cuda())
    targets = Variable(targets.type(torch.FloatTensor).cuda())
    optimizer.zero_grad()
    outputs = model(train)
    loss = error(outputs, targets)
    loss_list_train.append(loss.item())
    loss.backward()
    # loss.backward(retain_graph=True)
    optimizer.step()
    t = np.where(targets.cpu().detach().numpy() > 0.5, 1, 0)
    o = np.where(outputs.cpu().detach().numpy() > 0.5, 1, 0) 
    total_train += t.shape[0]
    equals_train += np.sum(t == o)
    num0_train += np.sum(t == 0)
    num1_train += np.sum(t == 1)
    TP_train += np.sum(np.logical_and(t == 1, o==1))
    FP_train += np.sum(np.logical_and(t == 1, o==0))
    TN_train += np.sum(np.logical_and(t == 0, o==0))
    FN_train += np.sum(np.logical_and(t == 0, o==1))      
    tb.save_value('Train Loss', 'train_loss', globaliter, loss.item())
    globaliter += 1
    tb.flush_line('train_loss')
    print(i)

  # Test
  model.eval()
  targets_plot = np.array([])
  outputs_plot = np.array([])
  inputs_plot = np.array([])


  for inputs, targets in test_loader:

    inputs = Variable(inputs.type(torch.FloatTensor).cuda())
    targets = Variable(targets.type(torch.FloatTensor).cuda())  
    outputs = model(inputs)
    loss = error(outputs, targets)
    loss_list_test.append(loss.item())
    #print(outputs.cpu().detach().numpy())
    t = np.where(targets.cpu().detach().numpy() > 0.5, 1, 0)
    o = np.where(outputs.cpu().detach().numpy() > 0.5, 1, 0)
    total_test += t.shape[0]
    equals_test += np.sum(t == o)
    num0_test += np.sum(t == 0)
    num1_test += np.sum(t == 1)
    TP_test += np.sum(np.logical_and(t == 1, o==1))
    FP_test += np.sum(np.logical_and(t == 0, o==1))
    TN_test += np.sum(np.logical_and(t == 0, o==0))
    FN_test += np.sum(np.logical_and(t == 1, o==0))
    tb.save_value('Test Loss', 'test_loss', globaliter2, loss.item())
    globaliter2 += 1
    tb.flush_line('test_loss')


  # Save value in array    
  graph_index += 1 
  plt_train_index.append(graph_index)
  plt_test_index.append(graph_index)
  train_loss.append(np.mean(np.array(loss_list_train)))
  test_loss.append(np.mean(np.array(loss_list_test)))

  print("------------------------------")
  print("Epoch : ", epoch)
  print("----- Train -----")
  print("Total =", total_train, " | Num 0 =", num0_train, " | Num 1 =", num1_train)
  print("Equals =", equals_train)
  print("Accuracy =", (equals_train / total_train)*100, "%")
  # print("TP =", TP_train / total_train, "% | TN =", TN_train / total_train, "% | FP =", FP_train / total_train, "% | FN =", FN_train / total_train, "%")
  print("----- Test -----")
  print("Total =", total_test, " | Num 0 =", num0_test, " | Num 1 =", num1_test)
  print("Equals =", equals_test)
  print("Accuracy =", (equals_test / total_test)*100, "%")