Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/334.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python Pytorch和Keras的行为不同_Python_Keras_Neural Network_Pytorch_Conv Neural Network - Fatal编程技术网

Python Pytorch和Keras的行为不同

Python Pytorch和Keras的行为不同,python,keras,neural-network,pytorch,conv-neural-network,Python,Keras,Neural Network,Pytorch,Conv Neural Network,我已经在Keras和Pytorch上从头开始实现了VGG16,以确保实现完全相同。我在CIFAR10上训练它,结果太不一样了 以下是我在keras中的实现: (x_train,y_train),(x_test,y_test)=cifar10.load_data() x_train = x_train / 255 x_test = x_test / 255 y_train=np_utils.to_categorical(y_train,10) y_test=np_utils.to_categor

我已经在Keras和Pytorch上从头开始实现了VGG16,以确保实现完全相同。我在CIFAR10上训练它,结果太不一样了

以下是我在keras中的实现:

(x_train,y_train),(x_test,y_test)=cifar10.load_data()
x_train = x_train / 255
x_test = x_test / 255

y_train=np_utils.to_categorical(y_train,10)
y_test=np_utils.to_categorical(y_test,10)

model = Sequential()
model.add(Conv2D(input_shape=(32,32,3),filters=64,kernel_size=(3,3),padding="same"))
model.add(ReLU())
model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same"))
model.add(ReLU())
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))

model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same"))
model.add(ReLU())
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same"))
model.add(ReLU())
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))

model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same"))
model.add(ReLU())
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same"))
model.add(ReLU())
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same"))
model.add(ReLU())
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))

model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same"))
model.add(ReLU())
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same"))
model.add(ReLU())
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same"))
model.add(ReLU())
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))

model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same"))
model.add(ReLU())
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same"))
model.add(ReLU())
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))

model.add(Flatten())

model.add(Dense(units=4096,activation="relu"))
model.add(Dense(units=4096,activation="relu"))
model.add(Dense(units=10, activation="softmax"))

opt = Adam(lr=0.0001, epsilon=1e-06)
model.compile(optimizer=opt, loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])

hist = model.fit(x_train, y_train,
    epochs=50,
    batch_size=100,
    validation_data=(x_test, y_test)
    )
在pytorch中也是如此:

num_epochs = 50
num_classes = 10
batch_size = 100
learning_rate = 0.0001

trans = transforms.ToTensor()
train_dataset = torchvision.datasets.CIFAR10(root="./dataset_pytorch", train=True, download=True, transform=trans)
test_dataset = torchvision.datasets.CIFAR10(root="./dataset_pytorch", train=False, download=True, transform=trans)

train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)

class ConvNet(nn.Module):
    def __init__(self):
        super(ConvNet, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2))
        self.layer2 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2))
        self.layer3 = nn.Sequential(
            nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2))
        self.layer4 = nn.Sequential(
            nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2))
        self.layer5 = nn.Sequential(
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2))
        self.fc1 = nn.Sequential(
            nn.Linear(512, 4096),
            nn.ReLU())
        self.fc2 = nn.Sequential(
            nn.Linear(4096, 4096),
            nn.ReLU())
        self.fcout = nn.Sequential(
            nn.Linear(4096, 10),
            nn.Softmax())

    def forward(self, x):
        out = self.layer1(x)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.layer4(out)
        out = self.layer5(out)
        out = out.reshape(out.size(0), -1)
        out = self.fc1(out)
        out = self.fc2(out)
        out = self.fcout(out)
        return out

def weights_init(m):
    if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
        nn.init.xavier_uniform_(m.weight.data)
        nn.init.zeros_(m.bias.data)

model = ConvNet()
model.apply(weights_init)

device = torch.device("cuda:0")#"cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, eps=1e-6)

# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
    total = 0
    correct = 0
    for i, (images, labels) in enumerate(train_loader):
        images = images.to(device)
        labels = labels.to(device)

        optimizer.zero_grad()

        # Run the forward pass
        outputs = model(images)
        loss = criterion(outputs, labels)

        # Backprop and perform Adam optimisation
        loss.backward()
        optimizer.step()

        # Track the accuracy
        total += labels.size(0)
        _, predicted = torch.max(outputs.data, 1)
        correct += (predicted == labels).sum().item()

    print("Train")
    print('Epoch [{}/{}], Accuracy: {:.2f}%'
          .format(epoch + 1, num_epochs, (correct / total) * 100))


    total_test = 0
    correct_test = 0
    for i, (images, labels) in enumerate(test_loader):
        images = images.to(device)
        labels = labels.to(device)

        # Run the forward pass
        outputs = model(images)

        # Track the accuracy
        total_test += labels.size(0)
        _, predicted = torch.max(outputs.data, 1)
        correct_test += (predicted == labels).sum().item()
    print("Test")
    print('Epoch [{}/{}], Accuracy: {:.2f}%'
          .format(epoch + 1, num_epochs, (correct_test / total_test) * 100))
如您所见,我已经对其进行了调整,使其在网络中使用相同的初始值设定项,并在优化器中使用相同的参数。但是,结果太不一样了,如下图所示

这是这些网络的另一个执行

我还尝试在每一层之后(在层和激活之间)添加一些批处理规范化。这有点解决了问题,但还没有完全解决。下面是情节:


对这种行为有什么合理的解释吗?或者我在代码中犯了什么错误吗?

仅仅从图上看,pytorch模型的学习率似乎太高了。可能是keras和torch的adam实现彼此不同(因此相同的名义学习率相差一个数量级),或者是权重初始化差异很大。你为什么不反复做实验,画出各个时期的平均准确率呢?我想过,但学习率已经很低了,不是吗?我想您可能对adam实现是正确的,或者pytorch实现中可能存在一些数值不稳定。我想确保我的代码中没有错误,你看到实现之间有任何错误或差异吗?在PyTorch中,你应该看到。Beriel是对的,如果你在输出层之后计算softmax,你应该使用
torch.nn.NLLLoss
。如果要使用CrossEntropyLoss,请删除softmax。是的,这是正确的答案,请将其作为答案,以便我可以将其标记为正确。仅通过查看绘图,pytorch模型的学习率似乎过高。可能是keras和torch的adam实现彼此不同(因此相同的名义学习率相差一个数量级),或者是权重初始化差异很大。你为什么不反复做实验,画出各个时期的平均准确率呢?我想过,但学习率已经很低了,不是吗?我想您可能对adam实现是正确的,或者pytorch实现中可能存在一些数值不稳定。我想确保我的代码中没有错误,你看到实现之间有任何错误或差异吗?在PyTorch中,你应该看到。Beriel是对的,如果你在输出层之后计算softmax,你应该使用
torch.nn.NLLLoss
。如果要使用CrossEntropyLoss,请删除softmax。是的,这是正确答案,请将其作为答案,以便我可以将其标记为正确。