Python ValueError:使用model.fit时,没有为任何变量提供渐变

Python ValueError:使用model.fit时,没有为任何变量提供渐变,python,tensorflow,machine-learning,keras,deep-learning,Python,Tensorflow,Machine Learning,Keras,Deep Learning,我试图使用从两个预先训练的模型(resnet和mobilenet)中提取的特征作为输入,使用Keras训练功能模型。我需要使用softmax图层将图像分类为1、2或3类 我的model.fit函数出现以下错误: ValueError: No gradients provided for any variable: ['dense_66/kernel:0', 'dense_66/bias:0', 'dense_64/kernel:0', 'dense_64/bias:0', 'dense_67/

我试图使用从两个预先训练的模型(resnet和mobilenet)中提取的特征作为输入,使用Keras训练功能模型。我需要使用softmax图层将图像分类为1、2或3类

我的model.fit函数出现以下错误:

ValueError: No gradients provided for any variable: ['dense_66/kernel:0', 'dense_66/bias:0',
 'dense_64/kernel:0', 'dense_64/bias:0', 'dense_67/kernel:0', 'dense_67/bias:0', 
'dense_65/kernel:0', 'dense_65/bias:0', 'dense_68/kernel:0', 'dense_68/bias:0', 
'dense_69/kernel:0', 'dense_69/bias:0', 'dense_70/kernel:0', 'dense_70/bias:0'].
以下是代码的相关部分:

创建数据集

def datasetgenerator(url,BATCH_SIZE,IMG_SIZE):
  data=image_dataset_from_directory(url,
                                             shuffle=True,
                                             batch_size=BATCH_SIZE,
                                             image_size=IMG_SIZE,
                                             label_mode='int'
                                             
                                             )
  return data

BATCH_SIZE = 20
IMG_SIZE = (160, 160)
train_dir='wound_dataset2/train'
train_dataset = datasetgenerator(url=train_dir,BATCH_SIZE=BATCH_SIZE,IMG_SIZE= IMG_SIZE)
val_dir='wound_dataset2/val'
validation_dataset = datasetgenerator(url=val_dir,BATCH_SIZE=BATCH_SIZE,IMG_SIZE= IMG_SIZE)
test_dir='wound_dataset2/test'
test_dataset = datasetgenerator(url=test_dir,BATCH_SIZE=BATCH_SIZE,IMG_SIZE= IMG_SIZE)
print(train_dataset)
特征提取

mobilenet_features = np.empty([20, 1280])
resnet_features = np.empty([20, 2048])
for data in train_dataset:
    image_batch, label_batch = data
    image_batch = data_augmentation(image_batch)
    preprocess_input_image_resnet = preprocess_input_resnet(image_batch)
    preprocess_input_image_mobilenet = preprocess_input_mobilenet(image_batch)
    feature_batch_resnet = base_model_resnet(preprocess_input_image_resnet)
    feature_batch_average_resnet = global_average_layer(feature_batch_resnet)
    feature_batch_mobilenet = base_model_mobilenet(preprocess_input_image_mobilenet)
    feature_batch_average_mobilenet = global_average_layer(feature_batch_mobilenet)
    mobilenet_features = np.concatenate((mobilenet_features, np.array(feature_batch_average_mobilenet)))
    resnet_features = np.concatenate((resnet_features, np.array(feature_batch_average_resnet)))
模型生成

from tensorflow.keras.layers import concatenate

# define two sets of inputs
inputA = tf.keras.Input(shape=(1280,))
inputB = tf.keras.Input(shape=(2048,))

# the first branch operates on the first input
x = tf.keras.layers.Dense(8, activation="relu")(inputA)
x = tf.keras.layers.Dense(4, activation="relu")(x)
x = tf.keras.Model(inputs=inputA, outputs=x)

# the second branch opreates on the second input
y = tf.keras.layers.Dense(64, activation="relu")(inputB)
y = tf.keras.layers.Dense(32, activation="relu")(y)
y = tf.keras.layers.Dense(4, activation="relu")(y)
y = tf.keras.Model(inputs=inputB, outputs=y)

# combine the output of the two branches
combined = concatenate([x.output, y.output])

fc_layers = [1024, 1024]
dropout = 0.5
# apply a FC layer and then a regression prediction on the
# combined outputs
z = Flatten()(combined)
for fc in fc_layers:
    # New FC layer, random init
    z = Dense(fc, activation='relu')(z) 
    z = Dropout(dropout)(z)
# New softmax layer
predictions = Dense(3, activation='softmax')(z)

# our model will accept the inputs of the two branches and
# then output a single value
model = tf.keras.Model(inputs=[x.input, y.input], outputs=z)
训练

model.compile(optimizer=tf.keras.optimizers.Adam(1e-3),
              loss= tf.keras.losses.CategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])

history = model.fit((mobilenet_features, resnet_features), batch_size=20, epochs=10)

我正在尝试将此作为一种方法来提高准确性,而不是使用迁移学习。任何帮助都将不胜感激。

model=tf.keras.model(输入=[x.input,y.input],输出=z)。为什么使用退出层作为最终输出层.model.fit((mobilenet\u功能、resnet\u功能),您没有提供目标张量。@TouYou Hi,谢谢您的回复。在这种情况下,我的目标张量是什么?之前我传递的图像数据集不需要目标。任务是将图像分类为类别1、2或3。这应该是每个输入的标签,只是一个类似[1、2、3]的类列表或者每个图像的标签映射?抱歉这个愚蠢的问题。
z = Flatten()(combined)
z = Dense(fc, activation='relu')(z) 
z = Dropout(dropout)(z)
z = Dense(fc, activation='relu')(z) 
z = Dropout(dropout)(z)

predictions = Dense(3, activation='softmax')(z)

# use the prediction as output layer

model = tf.keras.Model(inputs=[x.input, y.input], outputs=predictions)

#add target tensor to the fit method

history = model.fit((mobilenet_features, resnet_features),youTarget, batch_size=20, epochs=10)