Warning: file_get_contents(/data/phpspider/zhask/data//catemap/8/python-3.x/16.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 3.x Keras模型错误:TypeError:load_weights()缺少1个必需的位置参数:';文件路径';_Python 3.x_Keras - Fatal编程技术网

Python 3.x Keras模型错误:TypeError:load_weights()缺少1个必需的位置参数:';文件路径';

Python 3.x Keras模型错误:TypeError:load_weights()缺少1个必需的位置参数:';文件路径';,python-3.x,keras,Python 3.x,Keras,我正在运行keras tensorflow 2.0和python 3.7,并尝试加载权重 if weights: disc.load_weights(filepath='autoencoder_experiments/20171218-101804/weights/autoencoder.h5', by_name=True) genr.load_weights(filepath='autoencoder_experiments/20171218-101804/w

我正在运行keras tensorflow 2.0和python 3.7,并尝试加载权重

 if weights:
        disc.load_weights(filepath='autoencoder_experiments/20171218-101804/weights/autoencoder.h5', by_name=True)
        genr.load_weights(filepath='autoencoder_experiments/20171218-101804/weights/autoencoder.h5', by_name=True)

但有一个错误:

这是发生错误的函数

def train(BATCH_SIZE=32, disc=None, genr=None, original_model_name=None, weights=False):
    directory = os.path.join("experiments", datetime.now().strftime("%Y%m%d-%H%M%S"))
    if not os.path.exists(directory):
        # crea la cartella
        os.makedirs(directory)
        os.makedirs(directory + "/model")

    hdlr = logging.FileHandler(os.path.join(directory, 'output.log'))
    logger.addHandler(hdlr)

    logger.debug(directory)
    if original_model_name is not None:
        logger.debug("MORE TRAINING on the model %s" % original_model_name)

    # load dataset
    latent_dim = 20
    maxlen = 15
    n_samples = 25000
    data_dict = __build_dataset(maxlen=maxlen, n_samples=int(n_samples + n_samples * 0.33))
    X_train = data_dict['X_train']

    print("Training set shape %s" % (X_train.shape,))

    # models
    if disc is None:
        disc = discriminator_model(print_fn=logger.debug)
        plot_model(disc, to_file=os.path.join(directory, "discriminator.png"), show_shapes=True)
    if genr is None:
        genr = generator_model(print_fn=logger.debug)
        plot_model(genr, to_file=os.path.join(directory, "generator.png"), show_shapes=True)

    if weights:
        disc.load_weights(filepath='autoencoder_experiments/20171218-101804/weights/autoencoder.h5', by_name=True)
        genr.load_weights(filepath='autoencoder_experiments/20171218-101804/weights/autoencoder.h5', by_name=True)

.......
以及对象“disc”函数()


我怎样才能解决这个问题

事实上,它说的是“必需的位置参数”,这让我相信,如果你只是去掉
文件路径=
,它就会工作。你是在混合
keras
tf.keras
?这似乎是一个版本错误。
def discriminator_model(summary=True, print_fn=None):
    """
    Discriminator model takes a 3D tensor of size (batch_size, timesteps, word_index), outputs a domain embedding tensor of size (batch_size, lstm_vec_dim).
    :param summary: set to True to have a summary printed to output and a plot to file at "images/discriminator.png"
    :return: Discriminator model
    """
    dropout_value = 0.5
    cnn_filters = [20, 10]
    cnn_kernels = [2, 3]
    cnn_strides = [1, 1]
    enc_convs = []
    embedding_vec = 20  # lunghezza embedding layer
    leaky_relu_alpha = 0.2
    timesteps = 15
    word_index = 38
    latent_vector = 20

    discr_inputs = Input(shape=(timesteps, word_index),
                         name="Discriminator_Input")
    # embedding layer. expected output ( batch_size, timesteps, embedding_vec)
    # manual_embedding = Dense(embedding_vec, activation='linear', name="manual_embedding")
    # discr = TimeDistributed(manual_embedding, name='embedded', trainable=False)(discr_inputs)
    # discr = Embedding(word_index, embedding_vec, input_length=timesteps, name="discr_embedd")(
    #     discr_inputs)  # other embedding layer
    for i in range(2):
        conv = Conv1D(cnn_filters[i],
                      cnn_kernels[i],
                      padding='same',
                      strides=cnn_strides[i],
                      name='discr_conv%s' % i)(discr_inputs)
        conv = BatchNormalization()(conv)
        conv = LeakyReLU(alpha=leaky_relu_alpha)(conv)
        conv = Dropout(dropout_value, name='discr_dropout%s' % i)(conv)
        conv = AveragePooling1D()(conv)
        enc_convs.append(conv)

    # concatenating CNNs. expected output (batch_size, 7, 30)
    discr = concatenate(enc_convs)
    # discr = Flatten()(discr)
    discr = LSTM(latent_vector)(discr)
    # discr = Dropout(dropout_value)(discr)
    discr = Dense(1, activation='sigmoid',
                  kernel_initializer='normal'
                  )(discr)

    D = Model(inputs=discr_inputs, outputs=discr, name='Discriminator')

    if summary:
        if print_fn:
            D.summary(print_fn=print_fn)
        else:
            D.summary()
            # plot_model(D, to_file="images/discriminator.png", show_shapes=True)
    return D