Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/343.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 从数据生成器返回3个图像_Python_Tensorflow_Keras - Fatal编程技术网

Python 从数据生成器返回3个图像

Python 从数据生成器返回3个图像,python,tensorflow,keras,Python,Tensorflow,Keras,我试图通过我的数据发生器传递到我的三重网络3图像。我正在装载不同的一对,并将它们成批堆放。我不知道如何将它作为3个不同的数组返回。我试着把它添加到一个列表中,但也没用。如何使用数据生成器将它们返回 class DataGenerator(keras.utils.Sequence): 'Generates data for Keras' def __init__(self, list_IDs, batch_size=16, dim=(244,244,3), n_channels=3

我试图通过我的数据发生器传递到我的三重网络3图像。我正在装载不同的一对,并将它们成批堆放。我不知道如何将它作为3个不同的数组返回。我试着把它添加到一个列表中,但也没用。如何使用数据生成器将它们返回

class DataGenerator(keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, batch_size=16, dim=(244,244,3), n_channels=3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        list_IDs_temp = [self.list_IDs[k] for k in indexes]

        # Generate data
        X,Z, y = self.__data_generation(list_IDs_temp)
        return X, Z, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)
        # V = np.stack((X, Z), axis=-1)
        # F = np.stack((V, y), axis=-1)

    def __data_generation(self, list_IDs_temp):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.empty((self.batch_size, *self.dim))
        Z = np.empty((self.batch_size, *self.dim))

        y = np.empty((self.batch_size, *self.dim))

        # Generate data
        for i, ID in enumerate(list_IDs_temp):
            # Store sample

            image = plt.imread(os.path.join(IMAGE_DIR, ID[0])).astype(np.float32)
            image = imresize(image, (IM_SIZE, IM_SIZE))
            image1 = plt.imread(os.path.join(IMAGE_DIR, ID[1])).astype(np.float32)
            image1 = imresize(image1, (IM_SIZE, IM_SIZE))
            image2 = plt.imread(os.path.join(IMAGE_DIR, ID[2])).astype(np.float32)
            image2 = imresize(image2, (IM_SIZE, IM_SIZE))

            X[i,] = image
            Z[i,] = image1
            y[i,] = image2
        return X, Z, y

input_a = Input(shape=(224,224,3))
input_b = Input(shape=(224,224,3))
input_c = Input(shape=(224,224,3))

conv = Sequential([
        Conv2D(24, (7, 7), strides=(1,1), input_shape=(224,224,3)),
        BatchNormalization(epsilon=1e-06, axis=1, momentum=0.9),
        MaxPooling2D((3,3), strides=(2, 2)),
        Activation('relu'),
        Dropout(0.2),

        ZeroPadding2D((2, 2)),
        Conv2D(64, (5, 5), padding='same', strides=(1,1), kernel_initializer='glorot_uniform'),
        BatchNormalization(epsilon=1e-06, axis=1, momentum=0.9),
        MaxPooling2D((3,3), strides=(2, 2)),
        Activation('relu'),
        Dropout(0.2),

        ZeroPadding2D((1, 1)),
        Conv2D(96, (3,3), padding='same', strides=(1,1),kernel_initializer='glorot_uniform'),
        BatchNormalization(epsilon=1e-06, axis=1, momentum=0.9),
        MaxPool2D(pool_size=(2,2), strides=(2,2)),
        Activation('relu'),
        Dropout(0.2),

        ZeroPadding2D((1, 1)),
        Conv2D(96, (3,3), padding='same', strides=(1,1),kernel_initializer='glorot_uniform'),
        BatchNormalization(epsilon=1e-06, axis=1, momentum=0.9),
        Activation('relu'),
        MaxPool2D(pool_size=(2,2), strides=(2,2)),
        Dropout(0.2),

        ZeroPadding2D((1, 1)),
        Conv2D(64, (5, 5), padding='same', strides=(1,1), kernel_initializer='glorot_uniform'),
        BatchNormalization(epsilon=1e-06, axis=1, momentum=0.9),
        Activation('relu', name="activation_1_5"),
        MaxPooling2D((3,3), strides=(2, 2)),
        Dropout(0.2),   
        Dense(256, activation='relu'),
        Flatten()
    ])


net1 = conv(input_a)
net2 = conv(input_b)
net3 = conv(input_c)
d1 = subtract(net1, net2)
d2 = subtract(net1, net3)
n1 = norm(d1)
n2 = norm(d2)
out = Activation('sigmoid')(subtract(n2, n1))
model = Model(inputs=[input_a, input_b, input_c], outputs=out)
params = {'dim': (224,224,3),
          'batch_size': BATCH_SIZE,
          'n_channels': 3,
          'shuffle': False}

paramsv = {'dim': (224,224,3),
          'batch_size': BATCH_SIZE,
          'n_channels': 3,
          'shuffle': True}

training_generator = DataGenerator(partition_image['train'], **params)

validation_generator = DataGenerator(partition_image['validation'], **paramsv)
opt = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, decay=1e-6)

filepath = 'weights/weights.{epoch:02d}-{val_loss:.2f}.hdf5'
cpkt1 = ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=True, mode='auto', period=1)
cpkt2 = TensorBoard(log_dir='tensorboard/', histogram_freq=0, write_graph=True, write_images=True)
cpkt3 = EarlyStopping(monitor='val_loss', min_delta=0, patience=4, verbose=0, mode='auto')

model.compile(loss="binary_crossentropy", optimizer=opt, metrics=['accuracy'])
model.fit_generator(generator=training_generator, 
                    validation_data=validation_generator,
                       steps_per_epoch=int(np.ceil(len(partition_image['train']) / BATCH_SIZE) ), 
                       validation_steps=int(np.ceil(len(partition_image['validation']) / BATCH_SIZE) ),
                       epochs= EPOCHS,
                       shuffle = True,
                       verbose=1, callbacks=[cpkt1,cpkt2,cpkt3])



ValueError: Error when checking model input: the list of Numpy arrays that you are passing to your model is not the size the model expected. Expected to see 3 array(s), but instead got the following list of 1 arrays: [array([[[[180., 189., 194.],
        [...

可能还有其他解决方案,但我要做的是命名输入层,然后使用同名词典作为输入

因此,在您的模型中,您应该命名您的输入:

input_a = Input(shape=(224,224,3), name = "input_a")
input_b = Input(shape=(224,224,3), name = "input_b")
input_c = Input(shape=(224,224,3), name = "input_b")
然后,在生成器中,必须返回如下内容:

inputs ={"input_a":X,
         "input_b":Z,
         "input_c":y}
outputs ={"output":o}

return inputs,outputs

您可以在

中找到并举例说明具有多个输入的发电机。您可以给出模型结构、训练和训练步骤吗?我添加了模型,并安装了发电机