Python 如何为2个不同的cnn提供2个不同的数据集

Python 如何为2个不同的cnn提供2个不同的数据集,python,tensorflow,keras,deep-learning,Python,Tensorflow,Keras,Deep Learning,大家好,我是keras的新人,我有麻烦了。我已经找到了如何结合到cnn模型,但我不能给模型提供数据集。有人能帮我吗? 这是我的密码: # -*- coding: utf-8 -*- """ Created on Mon Dec 16 08:20:24 2019 @author: TECHFEA """ from keras import applications from keras.layers import GlobalAveragePooling2D, Dense,Flatten,Co

大家好,我是keras的新人,我有麻烦了。我已经找到了如何结合到cnn模型,但我不能给模型提供数据集。有人能帮我吗?

这是我的密码:

# -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 08:20:24 2019

@author: TECHFEA
"""

from keras import applications
from keras.layers import GlobalAveragePooling2D, Dense,Flatten,Conv2D,MaxPooling2D,Add,Input
from keras.layers import Concatenate
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import log_loss
from keras.models import Model
from keras.optimizers import SGD
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
import matplotlib.pyplot as plt
from keras.models import load_model
from scipy import interp
from itertools import cycle
from glob import glob
from keras.optimizers import Adam


train_path ="C:/Users/Monster/Desktop/furkan_ecevit/Datasets/fer_orj/train/"
validation_path ="C:/Users/Monster/Desktop/furkan_ecevit/Datasets/fer_orj/validation/"

train_path2="C:/Users/Monster/Desktop/furkan_ecevit/Datasets/fer_lbp/train_lbp/"
validation_path2="C:/Users/Monster/Desktop/furkan_ecevit/Datasets/fer_lbp/validation_lbp/"

className = glob(train_path + "*/")
numberOfClass = len(className)
batch_size=32

train_datagen = ImageDataGenerator(rescale= 1./255,
                                   vertical_flip=False,
                                   horizontal_flip=True)



validation_datagen = ImageDataGenerator(rescale = 1./255)

train_generator = train_datagen.flow_from_directory(train_path, target_size =(72,72),
                                                    batch_size = batch_size,
                                                    color_mode = "rgb",
                                                    class_mode = "categorical")

validation_generator = validation_datagen.flow_from_directory(validation_path, target_size =(72,72),
                                                    batch_size = 10,
                                                    color_mode = "rgb",
                                                    class_mode = "categorical")
train_generator2 = train_datagen.flow_from_directory(train_path2, target_size =(72,72),
                                                    batch_size = batch_size,
                                                    color_mode = "rgb",
                                                    class_mode = "categorical")

validation_generator2 = validation_datagen.flow_from_directory(validation_path2, target_size =(72,72),
                                                    batch_size = 10,
                                                    color_mode = "rgb",
                                                    class_mode = "categorical")




base_model1 = applications.VGG16(weights='imagenet', include_top=False, input_shape=(72,72,3))
base_model1.summary()
x1=base_model1.output
x1=Flatten()(x1)
x1=Dense(100,activation='relu')(x1)
model1 = Model(inputs=base_model1.input, outputs=x1)
model1.summary()

input_shallow = Input(shape = (72,72,3))

conv1 = Conv2D(16, (3,3), activation = 'relu', padding = "same")(input_shallow)
pool1 = MaxPooling2D(pool_size = (2,2), strides = 2)(conv1)
conv2 = Conv2D(32, (3,3), activation = 'relu', padding = "same")(pool1)
pool2 = MaxPooling2D(pool_size = (2,2), strides = 2)(conv2)
flat1=Flatten()(pool2)

dense_1=Dense(100,activation='relu')(flat1)

model2=Model(inputs=input_shallow,outputs=dense_1)
model2.summary()

mergedOut = Add()([model1.output,model2.output])
out=Dense(2048, activation='relu')(mergedOut) 
out = Dense(7, activation='softmax', name='predictions')(out)

model = Model(inputs=[model1.input,model2.input], outputs=out)
model.summary()


opt = Adam(lr=0.001, beta_1=0.9, beta_2=0.999)

model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["accuracy"])



hist = model.fit_generator(
        generator=(train_generator,train_generator2),
        steps_per_epoch = 10,
        epochs=16,
        validation_data =(validation_generator,validation_generator2),
        validation_steps = 2,
        shuffle=True)

以下是我想对图像执行的操作:


以下是我得到的错误: “DirectoryIterator”对象没有属性“ndim”


您能显示您收到的完整错误信息吗?这是完整的错误信息。我可以看到旧的imgur链接,但看不到新的链接。你需要一台发电机,不能有两台。您必须组合发电机,如下所示:@daniel Möller我已经尝试过,但不起作用:(