keras中函数权重的重用

keras中函数权重的重用,keras,tensorflow2.0,Keras,Tensorflow2.0,我有一个名为block from tensorflow.keras import layers from tensorflow.keras.initializers import glorot_uniform def block(X, classes): X = layers.Conv2D(filters = 3, kernel_size = (1, 1), strides = (1,1), name = 'a', kernel_initializer = glorot_uniform

我有一个名为
block

from tensorflow.keras import layers
from tensorflow.keras.initializers import glorot_uniform

def block(X, classes):
    X = layers.Conv2D(filters = 3, kernel_size = (1, 1), strides = (1,1), name = 'a', kernel_initializer = glorot_uniform())(X)
    X = layers.Activation('relu')(X)

    X = layers.Flatten()(X)
    X = layers.Dense(classes, activation='linear', name='fc', kernel_initializer = glorot_uniform())(X)
    return X
是一个神经网络模块。我想对几个输入重复使用
块的权重。例如,假设我有两个输入:
input1
input2
。如何将它们传递到
,以便权重再次用于
输入2
。以下代码不起作用,因为它为
B
初始化了一组新的权重

input1 = layers.Input((64, 64, 3))
input2 = layers.Input((64, 64, 3))

A = block(input1, 10)
B = block(input2, 10)

print(A)
print(B)

如果您将
block
设置为继承自
tf.keras.Model
的类,或者对其进行更改,使其返回包含所有所需层的
顺序
模型,则可能会有所帮助。目前,您似乎没有在任何地方保存体重

例如:

A = tf.keras.Sequential([
    # ... your layers
])

B = tf.keras.Sequential([
    # ... your layers
])


# Or use:

def block(params):
    model = tf.keras.Sequential([
        # ... your layers with params
    ])
    return model

A = block(params)
B = block(params)

# Or use

class block(tf.keras.Model):
  def __init__(self, params):
    super(block, self).__init__()
    self.layer1 = # ... first layer
    # ... your layers

  def call(self, inputs):
    X = self.layer1(inputs)
    # ... the rest of your layers
    return X
然后,您应该能够像这样获取层权重
A.layers[layer\u number]。获取层权重()
并使用
B.layers[layer\u number]设置层权重([np\u weight\u arr])

此外,我知道一些单独的层接受一个
weights
参数,尽管我必须仔细检查它是否适用于您提到的所有层。例如,我知道你可以做一些类似于
层的事情。嵌入(vocab\u size+1,emb\u dim,weights=[Embedding\u matrix],input\u length=maxlength)
,其中
嵌入\u matrix
是一个numpy数组


最后,如果您可以重用,那将是最简单的解决方案。

重用的一个选项是使用共享模型。我们可以从
block()
中创建一个单独的模型,我们可以随时重复使用它

import tensorflow as tf

from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow.keras.initializers import glorot_uniform

# this is my block
def block(X, classes):
    X = layers.Conv2D(filters = 3, kernel_size = (1, 1), strides = (1,1), kernel_initializer = glorot_uniform())(X)
    X = layers.Activation('relu')(X)

    X = layers.Flatten()(X)
    X = layers.Dense(classes, activation='linear', kernel_initializer = glorot_uniform())(X)
    return X


# block model

ip_base = layers.Input((64, 64, 3)) # decide on it
op = block(ip_base, 3)
block_model = models.Model(ip_base, op)

block_model_weights = block_model.get_weights()  # we can save this as pickle and later load for another model
print(block_model_weights)

# now we can save/re-use the weight of this model anytime we want
# secondary

input1 = layers.Input((64, 64, 3))
input2 = layers.Input((64, 64, 3))

A = block_model(input1)
B = block_model(input2)

model = models.Model([input1, input2], [A, B])

tf.keras.utils.plot_model(
    model)
输出:

如您所见,如果我们从基础中创建一个
模型
对象,然后通过它传递任何输入,权重将被共享

如果我们想加载一个预先训练的
模型,然后使用它以前的权重并传递一个输入,我们可以简单地这样做

# save the weights of the block model

weights = np.array(block_model.get_weights())

np.save('block_weights.npy', weights, allow_pickle = True)

# now we create another new block model
# block model

ip_base = layers.Input((64, 64, 3)) # decide on it
op = block(ip_base, 3)
new_block_model = models.Model(ip_base, op)

# currently it's randomly initialized but we can take the previous weights and load that in the current model

# load weight

old_weights = np.load('block_weights.npy', allow_pickle = True)

new_block_model.set_weights( old_weights )

# compare all of the weights

print(weights)
print('---------------')
print(old_weights)
print('---------------')
print(new_block_model.get_weights())
输出:

# save the weights of the block model

weights = np.array(block_model.get_weights())

np.save('block_weights.npy', weights, allow_pickle = True)

# now we create another new block model
# block model

ip_base = layers.Input((64, 64, 3)) # decide on it
op = block(ip_base, 3)
new_block_model = models.Model(ip_base, op)

# currently it's randomly initialized but we can take the previous weights and load that in the current model

# load weight

old_weights = np.load('block_weights.npy', allow_pickle = True)

new_block_model.set_weights( old_weights )

# compare all of the weights

print(weights)
print('---------------')
print(old_weights)
print('---------------')
print(new_block_model.get_weights())
[array([[[[ 0.38135457, -0.28602505,  0.6248524 ],
         [-0.10373783,  0.20868587, -0.0295043 ],
         [ 0.073596  , -0.85106397,  0.86780167]]]], dtype=float32)
 array([0., 0., 0.], dtype=float32)
 array([[-0.00512073,  0.00298601,  0.015619  ],
       [ 0.01500274,  0.01736909, -0.0106204 ],
       [ 0.00690563,  0.00548493,  0.00449893],
       ...,
       [-0.00202469, -0.00226198,  0.0212799 ],
       [-0.01203138, -0.00065516, -0.01211848],
       [-0.01238732, -0.00368575,  0.02146613]], dtype=float32)
 array([0., 0., 0.], dtype=float32)]
---------------
[array([[[[ 0.38135457, -0.28602505,  0.6248524 ],
         [-0.10373783,  0.20868587, -0.0295043 ],
         [ 0.073596  , -0.85106397,  0.86780167]]]], dtype=float32)
 array([0., 0., 0.], dtype=float32)
 array([[-0.00512073,  0.00298601,  0.015619  ],
       [ 0.01500274,  0.01736909, -0.0106204 ],
       [ 0.00690563,  0.00548493,  0.00449893],
       ...,
       [-0.00202469, -0.00226198,  0.0212799 ],
       [-0.01203138, -0.00065516, -0.01211848],
       [-0.01238732, -0.00368575,  0.02146613]], dtype=float32)
 array([0., 0., 0.], dtype=float32)]
---------------
[array([[[[ 0.38135457, -0.28602505,  0.6248524 ],
         [-0.10373783,  0.20868587, -0.0295043 ],
         [ 0.073596  , -0.85106397,  0.86780167]]]], dtype=float32), array([0., 0., 0.], dtype=float32), array([[-0.00512073,  0.00298601,  0.015619  ],
       [ 0.01500274,  0.01736909, -0.0106204 ],
       [ 0.00690563,  0.00548493,  0.00449893],
       ...,
       [-0.00202469, -0.00226198,  0.0212799 ],
       [-0.01203138, -0.00065516, -0.01211848],
       [-0.01238732, -0.00368575,  0.02146613]], dtype=float32), array([0., 0., 0.], dtype=float32)]