Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/283.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 通过tensorflow将标准化复制到keras_Python_Tensorflow_Keras_Normalization - Fatal编程技术网

Python 通过tensorflow将标准化复制到keras

Python 通过tensorflow将标准化复制到keras,python,tensorflow,keras,normalization,Python,Tensorflow,Keras,Normalization,事实上,我正试图在keras上复制一个tensorflow模型,我对这个话题很陌生。 我想复制这些台词 embedding = tf.layers.conv2d(conv6, 128, (16, 16), padding='VALID', name='embedding') embedding = tf.reshape(embedding, (-1, 128)) embedding = embedding - tf.reduce_min(embedding, keepdims =True) z_

事实上,我正试图在keras上复制一个tensorflow模型,我对这个话题很陌生。 我想复制这些台词

embedding = tf.layers.conv2d(conv6, 128, (16, 16), padding='VALID', name='embedding')
embedding = tf.reshape(embedding, (-1, 128))
embedding = embedding - tf.reduce_min(embedding, keepdims =True)
z_n = embedding/tf.reduce_max(embedding, keepdims =True)
我的实际代码是:

def conv_conv_pool(n_filters,
                   name,
                   pool=True,
                   activation=tf.nn.relu, padding='same', filters=(3,3)):
    """{Conv -> BN -> RELU}x2 -> {Pool, optional}
    Args:
        input_ (4-D Tensor): (batch_size, H, W, C)
        n_filters (list): number of filters [int, int]
        training (1-D Tensor): Boolean Tensor
        name (str): name postfix
        pool (bool): If True, MaxPool2D
        activation: Activaion functions
    Returns:
        net: output of the Convolution operations
        pool (optional): output of the max pooling operations
    """
    net = Sequential()
    for i, F in enumerate(n_filters):
        conv = Conv2D(
            filters = F,
            kernel_size = (3,3),
            padding = 'same',
            )
        net.add(conv)
        batch_norm = BatchNormalization()
        net.add(batch_norm)
        net.add(Activation('relu'))

    if pool is False:
        return net

    pool = Conv2D(
        filters = F,
        kernel_size = (3,3),
        strides = (2,2),
        padding = 'same',  
        )
    net.add(pool)
    batch_norm = BatchNormalization()
    net.add(batch_norm)
    net.add(Activation('relu'))
    return net


def model_keras():
    model = Sequential()
    model.add(conv_conv_pool(n_filters = [8, 8], name="1"))
    model.add(conv_conv_pool([32, 32], name="2"))
    model.add(conv_conv_pool([32, 32], name="3"))
    model.add(conv_conv_pool([64, 64], name="4"))
    model.add(conv_conv_pool([64, 64], name="5"))
    model.add(conv_conv_pool([128, 128], name="6", pool=False))
    return model
标准化应在第6层之后进行


我想用lambda图层,对吗?如果是,我应该如何编写它?

我相信您希望切换到tensorflow 2,它使用keras作为API。您需要安装/升级到tensorflow 2,然后可以尝试以下操作:

import tensorflow as tf

embedding = tf.keras.layers.conv2d(conv6, 128, (16, 16), padding='VALID', 
            name='embedding')
embedding = tf.keras.layers.reshape(embedding, (-1, 128))
embedding = embedding - tf.math.reduce_min(embedding, keepdims =True)
z_n = embedding/tf.math.reduce_max(embedding, keepdims =True)

我相信您希望切换到tensorflow 2,它使用keras作为API。您需要安装/升级到tensorflow 2,然后可以尝试以下操作:

import tensorflow as tf

embedding = tf.keras.layers.conv2d(conv6, 128, (16, 16), padding='VALID', 
            name='embedding')
embedding = tf.keras.layers.reshape(embedding, (-1, 128))
embedding = embedding - tf.math.reduce_min(embedding, keepdims =True)
z_n = embedding/tf.math.reduce_max(embedding, keepdims =True)

如果您想使用keras层api,您可以创建一个自定义层,您可以在这里找到如何创建自定义层的文档 , 你应该这样结束:

class NormalizationLayer(layers.Layer):

  def __init__(self, filters=128):
    super(NormalizationLayer, self).__init__()
    self.filters = filters

  def call(self, inputs):
    embedding = tf.keras.layers.conv2d(inputs, self.filters, (16, 16), padding='VALID', 
            name='embedding')
    embedding = tf.keras.layers.reshape(embedding, (-1, self.filters))
    embedding = embedding - tf.math.reduce_min(embedding, keepdims =True)
    z_n = embedding/tf.math.reduce_max(embedding, keepdims =True)
    return zn

如果您想使用keras层api,您可以创建一个自定义层,您可以在这里找到如何创建自定义层的文档 , 你应该这样结束:

class NormalizationLayer(layers.Layer):

  def __init__(self, filters=128):
    super(NormalizationLayer, self).__init__()
    self.filters = filters

  def call(self, inputs):
    embedding = tf.keras.layers.conv2d(inputs, self.filters, (16, 16), padding='VALID', 
            name='embedding')
    embedding = tf.keras.layers.reshape(embedding, (-1, self.filters))
    embedding = embedding - tf.math.reduce_min(embedding, keepdims =True)
    z_n = embedding/tf.math.reduce_max(embedding, keepdims =True)
    return zn

我使用您在Lambda层中引入的规范化。我还做了一个修正(最小值和最大值是在相同的输入上计算的,而不是一个在输入上,另一个在转换上),但是您也可以更改它
norm_original
使用所有通道上计算的最小值和最大值对4D输入进行规格化,并尝试返回具有固定数量特征的2D输出。这将产生错误,因为您正在修改批次尺寸

def norm_original(inp):

    embedding = tf.reshape(inp, (-1, inp.shape[-1]))
    embedding = embedding - tf.reduce_min(inp)
    embedding = embedding / tf.reduce_max(inp)

    return embedding

inp = Input((28,28,3))
x = Conv2D(128, 3, padding='same')(inp)
x = Lambda(norm_original)(x)

m = Model(inp, x)
m.compile('adam', 'mse')
m.summary()

X = np.random.uniform(0,1, (10,28,28,3))
y = np.random.uniform(0,1, (10,128))

m.fit(X,y, epochs=3) # error
inp = Input((28,28,3))
x = Conv2D(128, 3, padding='same')(inp)
x = Lambda(norm)(x)
x = GlobalMaxPool2D()(x) # u can also use GlobalAveragePooling2D

m = Model(inp, x)
m.compile('adam', 'mse')

X = np.random.uniform(0,1, (10,28,28,3))
y = np.random.uniform(0,1, (10,128))

m.fit(X,y, epochs=3)
为了避免这个错误,我提出了两种可能性。我还更改了按通道操作规范化(我保留它更合适),但您也可以修改它

1) 您可以使用最小值/最大值对4D输入进行规格化,然后将输出全部放在最后一个维度上。此解决方案不会替代批次dim

def norm(inp):
    ## this function operate normalization by channels
    embedding = inp - tf.reduce_min(inp, keepdims=True, axis=[0,1,2])
    embedding = embedding / tf.reduce_max(inp, keepdims=True, axis=[0,1,2])

    return embedding

inp = Input((28,28,3))
x = Conv2D(128, 3, padding='same')(inp)
x = Lambda(norm)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)

m = Model(inp, x)
m.compile('adam', 'mse')

X = np.random.uniform(0,1, (10,28,28,3))
y = np.random.uniform(0,1, (10,128))

m.fit(X,y, epochs=3)
2) 您可以使用全局工具层来减少4D尺寸,并将其重新导入到2D形状,从而保留特征尺寸

def norm_original(inp):

    embedding = tf.reshape(inp, (-1, inp.shape[-1]))
    embedding = embedding - tf.reduce_min(inp)
    embedding = embedding / tf.reduce_max(inp)

    return embedding

inp = Input((28,28,3))
x = Conv2D(128, 3, padding='same')(inp)
x = Lambda(norm_original)(x)

m = Model(inp, x)
m.compile('adam', 'mse')
m.summary()

X = np.random.uniform(0,1, (10,28,28,3))
y = np.random.uniform(0,1, (10,128))

m.fit(X,y, epochs=3) # error
inp = Input((28,28,3))
x = Conv2D(128, 3, padding='same')(inp)
x = Lambda(norm)(x)
x = GlobalMaxPool2D()(x) # u can also use GlobalAveragePooling2D

m = Model(inp, x)
m.compile('adam', 'mse')

X = np.random.uniform(0,1, (10,28,28,3))
y = np.random.uniform(0,1, (10,128))

m.fit(X,y, epochs=3)

我使用您在Lambda层中引入的规范化。我还做了一个修正(最小值和最大值是在相同的输入上计算的,而不是一个在输入上,另一个在转换上),但是您也可以更改它
norm_original
使用所有通道上计算的最小值和最大值对4D输入进行规格化,并尝试返回具有固定数量特征的2D输出。这将产生错误,因为您正在修改批次尺寸

def norm_original(inp):

    embedding = tf.reshape(inp, (-1, inp.shape[-1]))
    embedding = embedding - tf.reduce_min(inp)
    embedding = embedding / tf.reduce_max(inp)

    return embedding

inp = Input((28,28,3))
x = Conv2D(128, 3, padding='same')(inp)
x = Lambda(norm_original)(x)

m = Model(inp, x)
m.compile('adam', 'mse')
m.summary()

X = np.random.uniform(0,1, (10,28,28,3))
y = np.random.uniform(0,1, (10,128))

m.fit(X,y, epochs=3) # error
inp = Input((28,28,3))
x = Conv2D(128, 3, padding='same')(inp)
x = Lambda(norm)(x)
x = GlobalMaxPool2D()(x) # u can also use GlobalAveragePooling2D

m = Model(inp, x)
m.compile('adam', 'mse')

X = np.random.uniform(0,1, (10,28,28,3))
y = np.random.uniform(0,1, (10,128))

m.fit(X,y, epochs=3)
为了避免这个错误,我提出了两种可能性。我还更改了按通道操作规范化(我保留它更合适),但您也可以修改它

1) 您可以使用最小值/最大值对4D输入进行规格化,然后将输出全部放在最后一个维度上。此解决方案不会替代批次dim

def norm(inp):
    ## this function operate normalization by channels
    embedding = inp - tf.reduce_min(inp, keepdims=True, axis=[0,1,2])
    embedding = embedding / tf.reduce_max(inp, keepdims=True, axis=[0,1,2])

    return embedding

inp = Input((28,28,3))
x = Conv2D(128, 3, padding='same')(inp)
x = Lambda(norm)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)

m = Model(inp, x)
m.compile('adam', 'mse')

X = np.random.uniform(0,1, (10,28,28,3))
y = np.random.uniform(0,1, (10,128))

m.fit(X,y, epochs=3)
2) 您可以使用全局工具层来减少4D尺寸,并将其重新导入到2D形状,从而保留特征尺寸

def norm_original(inp):

    embedding = tf.reshape(inp, (-1, inp.shape[-1]))
    embedding = embedding - tf.reduce_min(inp)
    embedding = embedding / tf.reduce_max(inp)

    return embedding

inp = Input((28,28,3))
x = Conv2D(128, 3, padding='same')(inp)
x = Lambda(norm_original)(x)

m = Model(inp, x)
m.compile('adam', 'mse')
m.summary()

X = np.random.uniform(0,1, (10,28,28,3))
y = np.random.uniform(0,1, (10,128))

m.fit(X,y, epochs=3) # error
inp = Input((28,28,3))
x = Conv2D(128, 3, padding='same')(inp)
x = Lambda(norm)(x)
x = GlobalMaxPool2D()(x) # u can also use GlobalAveragePooling2D

m = Model(inp, x)
m.compile('adam', 'mse')

X = np.random.uniform(0,1, (10,28,28,3))
y = np.random.uniform(0,1, (10,128))

m.fit(X,y, epochs=3)

为了更好地解释我的处境。事实上,我正在编写一个关于Keras的顺序模型(这里是代码)`def model_Keras():model=sequential()model.add(conv_conv_pool(n_filters=[8,8],name=“1”))model.add(conv_conv_pool([32,32],name=“2”))model.add(conv_conv_conv_conv_conv_pool([32,32],name=“3”))model.add(conv_conv_conv_conv_conv_conv_pool([64,64],name=“4”))model.add([64,64],name=“5”))model.add(conv_conv_pool([128,128],name=“6”,pool=False)`我想对名为“6”的层执行操作,但我不太清楚该怎么做。请在编辑问题的上方插入代码。如果可能的话,还报告什么是conv_conv_pool好的,谢谢,现在很清楚了……您引用的规范化接收来自conv层(4d形状)的输入然后尝试返回一个2d形状。它正确吗?这是你想要的吗?还是在输出中保持al 4d更好?@MarcoCerliani是的,我想通过4d进行重塑2d@MarcoCerliani非常感谢,我将尝试一下,并告诉uTo更好地解释我的情况。实际上,我正在编写一个关于Keras的序列(这里是代码)def model_keras():model=Sequential()model.add(conv_conv_pool(n_filters=[8,8],name=“1”))model.add(conv_conv_pool([32,32],name=“2”))model.add(conv_conv_pool([32,32],name=“3”))model.add(conv_conv_conv_conv_pool([64,64],name=“4”))model.add(conv_conv_conv_conv_conv_conv_pool([64,64,64],name=“5”))model.add(conv[128],name=“6”,pool=False)`我想对名为“6”的层执行操作,但我不太确定如何执行。请在编辑问题的上方插入您的代码。如果可能,还报告什么是conv_conv_poolok,谢谢,现在已经清楚了……您引用的规范化接收来自conv层(4d形状)的输入然后尝试返回一个2d形状。它正确吗?这是你想要的吗?还是在输出中保持al 4d更好?@MarcoCerliani是的,我想通过4d进行重塑2d@MarcoCerliani非常感谢你,我要试试,然后告诉你