Keras 我得到这个错误';InvalidArgumentError:输入必须是四维[][Op:Conv2D]';即使我';m发送批次(大小=1)作为输入

Keras 我得到这个错误';InvalidArgumentError:输入必须是四维[][Op:Conv2D]';即使我';m发送批次(大小=1)作为输入,keras,tensorflow2.0,generative-adversarial-network,Keras,Tensorflow2.0,Generative Adversarial Network,这是我的代码:我无法理解我的代码的哪一部分出错了(发送3维输入而不是批处理),我在pix2pix代码中加入了感知损失。当我使用默认参数调用该类时,会发生错误 类pan(): def __init__(self,img_shape,batch_size,l1,l2,l3,l4,m): self.img_shape= img_shape self.batch_size=batch_size self.l1=l1 self.l2=l2 self.l3=l3

这是我的代码:我无法理解我的代码的哪一部分出错了(发送3维输入而不是批处理),我在pix2pix代码中加入了感知损失。当我使用默认参数调用该类时,会发生错误

类pan():

def __init__(self,img_shape,batch_size,l1,l2,l3,l4,m):

    self.img_shape= img_shape
    self.batch_size=batch_size
    self.l1=l1
    self.l2=l2
    self.l3=l3
    self.l4=l4
    #self.d_model=d_model
    self.m=m
    def pan_loss_g(self,X_realB,X_fakeB):
        _,p1,p2,p3,p4,_=keras.Model(inputs=X_realB,outputs=[layer.output for layer in self.define_discriminator.layers])
        _,p1f,p2f,p3f,p4f,_= keras.Model(inputs=X_fakeB,outputs=[layer.output for layer in self.define_discriminator.layers]) 
        s1=self.l1*k.mean(k.abs(p1-p1f))
        s2=self.l2*k.mean(k.abs(p2-p2f))
        s3=self.l3*k.mean(k.abs(p3-p3f))
        s4=self.l4*k.mean(k.abs(p4-p4f))
        return s1+s2+s3+s4
    def pan_loss_d(self,X_realB,X_fakeB):
        _, p1,p2,p3,p4,_ = keras.Model(inputs=np.expand_dims(X_realB,0),  outputs=[layer.output for layer in self.define_discriminator.layers])
        _, p1f, p2f, p3f, p4f, _ = keras.Model(inputs=np.expand_dims(X_fakeB,0), outputs=[layer.output for layer in self.define_discriminator.layers])
        s1=self.l1*k.mean(k.abs(p1-p1f))
        s2=self.l2*k.mean(k.abs(p2-p2f))
        s3=self.l3*k.mean(k.abs(p3-p3f))
        s4=self.l4*k.mean(k.abs(p4-p4f))
        return max(self.m-(s1+s2+s3+s4),0)

    #DISCRIMINATOR
    self.discriminator,_ ,_ ,_ ,_=self.define_discriminator()
    self.discriminator.compile(loss=['binary_crossentropy',pan_loss_d],loss_weights=[1,0.5],optimizer=Adam(lr=0.0002, beta_1=0.5))
    #GENERATOR
    self.generator = self.define_generator()
    img = Input(shape=self.img_shape)
    gen_out = self.generator(self.img_shape)
    self.discriminator.trainable = False
    dis_out, _, _, _ ,_ = self.discriminator([img, gen_out])
    self.combined=Model(img, [dis_out, gen_out])
    self.combined.compile(loss=['binary_crossentropy','mae', pan_loss_g], optimizer=Adam(lr=0.0002, beta_1=0.5), loss_weights=[1,100,1])
def define_generator(self):
 #generator architechture
 return model
def define_discriminator(self):
# weight initialization
    init = RandomNormal(stddev=0.02)
    # source image input
    in_src_image = Input(self.img_shape)
    # target image input
    in_target_image = Input(self.img_shape)
    #model architechture
    return model,d2,d3,d4,d5