Python Keras ModelCheckpoint在保存模型时引入了附加层

Python Keras ModelCheckpoint在保存模型时引入了附加层,python,python-3.x,keras,deep-learning,Python,Python 3.x,Keras,Deep Learning,我试图在Keras中使用ModelCheckpoint保存模型。我使用以下代码片段保存模型 model = load_vgg() parallel_model = keras.utils.multi_gpu_model(model_1, gpus=2) parallel_model.compile(loss="binary_crossentropy", metrics=['accuracy'], optimizer=Adam()) early_stopping = EarlyStopping(m

我试图在Keras中使用ModelCheckpoint保存模型。我使用以下代码片段保存模型

model = load_vgg()
parallel_model = keras.utils.multi_gpu_model(model_1, gpus=2)
parallel_model.compile(loss="binary_crossentropy", metrics=['accuracy'], optimizer=Adam())
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, mode='min')
checkpoint = ModelCheckpoint(os.path.join(ouput_dir, "model.h5"), monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
history = parallel_model.fit_generator(train_gen, steps_per_epoch=math.ceil(num_train_samples / batch_size), validation_data=val_gen, validation_steps=math.ceil(num_val_samples / batch_size), epochs=200, verbose=1, class_weight=class_weights, callbacks=[checkpoint, early_stopping])
model.save(os.path.join(ouput_dir, 'model_2.h5'))
使用以下代码定义模型:

def load_vgg(in_shape=(x, y), n_classes=1, n_stages_per_blocks=[2, 2, 2, 2, 2]):
  in_layer = keras.layers.Input(in_shape)
  block1 = _block(in_layer, 64, n_stages_per_blocks[0])
  pool1 = keras.layers.MaxPool1D()(block1)
  block2 = _block(pool1, 128, n_stages_per_blocks[1])
  pool2 = keras.layers.MaxPool1D()(block2)
  block3 = _block(pool2, 256, n_stages_per_blocks[2])
  pool3 = keras.layers.MaxPool1D()(block3)
  block4 = _block(pool3, 512, n_stages_per_blocks[3])
  pool4 = keras.layers.MaxPool1D()(block4)
  block5 = _block(pool4, 512, n_stages_per_blocks[4])
  pool5 = keras.layers.MaxPool1D()(block5)
  flattened = keras.layers.Flatten()(pool5)
  dense1 = keras.layers.Dense(2048, activation='relu')(flattened)
  dense2 = keras.layers.Dense(1024, activation='relu')(dense1)
  preds = keras.layers.Dense(n_classes, activation='sigmoid')(dense2)
  model = keras.models.Model(in_layer, preds)
  model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
  return model

convBlock = partial(keras.layers.Conv1D, kernel_size=3, strides=1, padding='same', activation='relu')

def _block(in_tensor, filters, n_convs):
  conv_block = in_tensor
  for _ in range(n_convs):
    conv_block = convBlock(filters=filters)(conv_block)
  return conv_block
问题:当我们加载使用ModelCheckpoint保存的模型和使用save函数直接保存的模型时,它们为我们提供了不同的模型摘要

使用ModelCheckpoint保存的模型摘要: 使用模型的保存功能保存的模型摘要:


为什么ModelCheckpoint要引入三个附加层并将模型移动到model_1层?为了确保ModelCheckpoint保存的模型与使用save函数获得的模型具有相同的结构,我需要做哪些更改?任何帮助都将不胜感激。如果您需要任何其他信息,请告诉我。

根据Keras文档:

要保存多gpu型号,请使用
.save(fname)
.save\u weights(fname)
使用模板模型(传递给multi_gpu_模型的参数), 而不是多gpu模型返回的模型

当我们使用ModelCheckpoint时,同样的问题也会发生。在GPU模型上调用回调,这是不正确的

有两种解决方案:1)实现ModelCheckpoint版本,将模板模型作为参数传递(下面提供的代码),或2)遵循以下步骤 实现一个类,确保对save函数的任何调用都将使用模板模型

多gpu模型的模型检查点实现:

import keras
import numpy as np 
import warnings

class ModelCheckpoint(keras.callbacks.Callback):

    def __init__(self, filepath, ser_model, monitor='val_loss', verbose=0,
                 save_best_only=False, save_weights_only=False,
                 mode='auto', period=1):
        super(ModelCheckpoint, self).__init__()
        self.monitor = monitor
        self.verbose = verbose
        self.filepath = filepath
        self.save_best_only = save_best_only
        self.save_weights_only = save_weights_only
        self.period = period
        self.epochs_since_last_save = 0
        self.ser_model = ser_model

        if mode not in ['auto', 'min', 'max']:
            warnings.warn('ModelCheckpoint mode %s is unknown, '
                          'fallback to auto mode.' % (mode),
                          RuntimeWarning)
            mode = 'auto'

        if mode == 'min':
            self.monitor_op = np.less
            self.best = np.Inf
        elif mode == 'max':
            self.monitor_op = np.greater
            self.best = -np.Inf
        else:
            if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
                self.monitor_op = np.greater
                self.best = -np.Inf
            else:
                self.monitor_op = np.less
                self.best = np.Inf

    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        self.epochs_since_last_save += 1
        if self.epochs_since_last_save >= self.period:
            self.epochs_since_last_save = 0
            filepath = self.filepath.format(epoch=epoch + 1, **logs)
            if self.save_best_only:
                current = logs.get(self.monitor)
                if current is None:
                    warnings.warn('Can save best model only with %s available, '
                                  'skipping.' % (self.monitor), RuntimeWarning)
                else:
                    if self.monitor_op(current, self.best):
                        if self.verbose > 0:
                            print('\nEpoch %05d: %s improved from %0.5f to %0.5f,'
                                  ' saving model to %s'
                                  % (epoch + 1, self.monitor, self.best,
                                     current, filepath))
                        self.best = current
                        if self.save_weights_only:
                            self.ser_model.save_weights(filepath, overwrite=True)
                        else:
                            self.ser_model.save(filepath, overwrite=True)
                    else:
                        if self.verbose > 0:
                            print('\nEpoch %05d: %s did not improve from %0.5f' %
                                  (epoch + 1, self.monitor, self.best))
            else:
                if self.verbose > 0:
                    print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
                if self.save_weights_only:
                    self.ser_model.save_weights(filepath, overwrite=True)
                else:
                    self.ser_model.save(filepath, overwrite=True)
请随时提出您的任何意见和建议