Python TypeError:forward()缺少1个必需的位置参数:';负数';

Python TypeError:forward()缺少1个必需的位置参数:';负数';,python,pytorch,Python,Pytorch,我想利用深度神经网络对高光谱图像进行分类。但每次我运行这段代码时,它都会给我一个错误“TypeError:forward()缺少1个必需的位置参数:'负'”。 代码显示如下(未完成): 要读取数据: class DATA(): # 读取样本和标签,并转换为numpy数组格式 Pavia = sio.loadmat('G:\研究生\Matlab_code\dataset\Classification\paviaU.mat') PaviaGT = sio.loadmat('G

我想利用深度神经网络对高光谱图像进行分类。但每次我运行这段代码时,它都会给我一个错误“TypeError:forward()缺少1个必需的位置参数:'负'”。 代码显示如下(未完成):

要读取数据:

class DATA():
    # 读取样本和标签,并转换为numpy数组格式
    Pavia = sio.loadmat('G:\研究生\Matlab_code\dataset\Classification\paviaU.mat')
    PaviaGT = sio.loadmat('G:\研究生\Matlab_code\dataset\Classification\paviaU_GT.mat')
    # print(sorted(Pavia.keys()))   返回字典中的键值key()
    # print(sorted(PaviaGT.keys())) 

    Sample = Pavia['data']
    Sample = np.array(Sample, dtype = np.int32)

    Label = PaviaGT['groundT']
    Label = np.array(Label, dtype = np.int32)

    # 将样本每一维度的数值存到a,b,c中,以便后续使用
    [a,b,c]=Sample.shape

    # 将数据reshape成matlab中的格式
    SampleT = Sample.transpose(1, 0, 2)
    SampleX = SampleT.reshape(-1,103)
    """ sio.savemat('G:\研究生\Sample.mat',{'dataX':SampleX})  """
    LabelT = Label.transpose(1,0)
    Label = LabelT.reshape(-1,1)
    # 如何将样本和标签合并,输入神经网络的数据为[-1,band]
    """ sio.savemat('G:\研究生\Label.mat',{'LabelX':Label}) """
    totalcount = np.zeros((10,1),dtype = np.int32)
    trainset = []
    testset = []

    # 将样本和标签合并
    def integrated_data(self):
        rebuilddata = []
        for i in range(0,self.a*self.b):
            rebuilddata.append([np.array(self.SampleX[i]),np.array(self.Label[i])])
            for j in range(0,10):
                if self.Label[i] == j:  
                    self.totalcount[j] += 1
        rebuilddata = np.array(rebuilddata)
        return rebuilddata

    # 并制作训练和测试数据
    def make_trainset_and_testset(self, rebuilddata, ratio):
        TrainIndex = []
        TestIndex = []

        # 取出每一类的训练集和测试集坐标
        for i in range(1,np.max(self.Label)+1):
            class_coor = np.argwhere(self.Label == i)
            index = class_coor[:,0].tolist()
            np.random.shuffle(index)
            VAL_SIZE = int(np.floor(len(index)*ratio))
            ClassTrainIndex = index[:VAL_SIZE]
            ClassTestIndex = index[-VAL_SIZE:]
            TrainIndex += ClassTrainIndex
            TestIndex += ClassTestIndex

        # 返回训练集和测试集样本        
        TrainSample = rebuilddata[TrainIndex]
        TestSample = rebuilddata[TestIndex]
        return TrainIndex,TestIndex,TrainSample,TestSample
这是我的dnn模块:

class DNN(nn.Module):
    def __init__(self):
        super().__init__()
        self.fc1 = nn.Linear(103, 500)
        self.fc2 = nn.Linear(500, 256)
        self.fc3 = nn.Linear(256, 9)

    def forward(self,x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return F.softmax(x,dim=1)
培训和测试功能:

def train(dnn):
    BATCH_SIZE = 100
    EPOCHS = 3
    for epoch in range(EPOCHS):
        for i in tqdm(range(0, len(train_X), BATCH_SIZE)):
            batch_X = train_X[i:i+BATCH_SIZE]
            batch_y = train_y[i:i+BATCH_SIZE]

            dnn.zero_grad()
            outputs = dnn(batch_X)
            loss = loss_function(outputs, batch_y)
            loss.backward()
            optimizer.step()
        print(loss)

 def test(net):
     correct = 0
     total = 0
     with torch.no_grad():
         for i in tqdm(range(len(test_X))):
             real_class = torch.argmax(test_y[i]).to(device)
             net_out = dnn(test_X[i].view(-1, 1, 50, 50).to(device))[0]

             predicted_class = torch.argmax(net_out)
             if predicted_class == real_class:
                 correct += 1
             total += 1
     print("Accuracy:", round(correct/total,3))

if REBUILD_DATA:
    Data = DATA()
    datay = Data.integrated_data()
    Trainindex, Testindex, TrainSet, TestSet = Data.make_trainset_and_testset(rebuilddata=datay,ratio=0.1)

train_X = torch.Tensor([i[0] for i in TrainSet])
train_y = torch.Tensor([i[1] for i in TrainSet])
train_X = train_X/3000
test_X = torch.Tensor([i[0] for i in TestSet])
test_y = torch.Tensor([i[1] for i in TestSet])

print(train_X[0])
dnn = DNN()
optimizer = optim.SGD(dnn.parameters(), lr = 0.001)
loss_function = nn.TripletMarginLoss()
train(dnn)
您正在使用作为损失函数。
此特定损失函数需要三个输入来计算损失:
anchor

您的代码只传递两个参数。

您使用的是丢失函数。
此特定损失函数需要三个输入来计算损失:
anchor


您的代码只传递两个参数。

您应该发布完整的错误跟踪您应该发布完整的错误跟踪谢谢您解决了这个问题。但我得到了一个新的,我仍然需要修改。因为我刚刚开始pytorch。谢谢你解决这个问题。但我得到了一个新的,我仍然需要修改。因为我刚刚开始pytorch。
def train(dnn):
    BATCH_SIZE = 100
    EPOCHS = 3
    for epoch in range(EPOCHS):
        for i in tqdm(range(0, len(train_X), BATCH_SIZE)):
            batch_X = train_X[i:i+BATCH_SIZE]
            batch_y = train_y[i:i+BATCH_SIZE]

            dnn.zero_grad()
            outputs = dnn(batch_X)
            loss = loss_function(outputs, batch_y)
            loss.backward()
            optimizer.step()
        print(loss)

 def test(net):
     correct = 0
     total = 0
     with torch.no_grad():
         for i in tqdm(range(len(test_X))):
             real_class = torch.argmax(test_y[i]).to(device)
             net_out = dnn(test_X[i].view(-1, 1, 50, 50).to(device))[0]

             predicted_class = torch.argmax(net_out)
             if predicted_class == real_class:
                 correct += 1
             total += 1
     print("Accuracy:", round(correct/total,3))

if REBUILD_DATA:
    Data = DATA()
    datay = Data.integrated_data()
    Trainindex, Testindex, TrainSet, TestSet = Data.make_trainset_and_testset(rebuilddata=datay,ratio=0.1)

train_X = torch.Tensor([i[0] for i in TrainSet])
train_y = torch.Tensor([i[1] for i in TrainSet])
train_X = train_X/3000
test_X = torch.Tensor([i[0] for i in TestSet])
test_y = torch.Tensor([i[1] for i in TestSet])

print(train_X[0])
dnn = DNN()
optimizer = optim.SGD(dnn.parameters(), lr = 0.001)
loss_function = nn.TripletMarginLoss()
train(dnn)