Python tensorflow keras:在另一层中使用一个层的变量(序列化错误:TypeError:can';t pickle_thread.RLock对象)
我想在后续的层B中使用层a的变量。这会导致序列化问题,具体取决于范围Python tensorflow keras:在另一层中使用一个层的变量(序列化错误:TypeError:can';t pickle_thread.RLock对象),python,tensorflow,keras,deep-learning,Python,Tensorflow,Keras,Deep Learning,我想在后续的层B中使用层a的变量。这会导致序列化问题,具体取决于范围 具体来说,我有一个一般(最小)设置: X_-in->规格化(X_-in)->某些_层(X)->非规格化(X)->X_-out 对于非规范化(X)步骤,我需要使用平均值和方差 在normalize(X)中找到 为此,我使用了tensorflow的 其变量均值和方差 当我在全局范围内写入所有内容时,该设置有效: dat = np.random.random(size=(100,7)) ########## 1: gl
- 具体来说,我有一个一般(最小)设置:
- 对于非规范化(X)步骤,我需要使用
平均值
和
方差
在normalize(X)中找到
- 为此,我使用了tensorflow的
其变量
和均值
方差
dat = np.random.random(size=(100,7))
########## 1: global scope ##########
input = tf.keras.Input(shape=dat.shape[1:])
norm_layer = tf.keras.layers.experimental.preprocessing.Normalization()
x = norm_layer(input)
output = tf.keras.layers.Lambda(lambda xx: xx * tf.sqrt(norm_layer.variance) + norm_layer.mean)(x)
model = tf.keras.Model(input,output)
model.compile(optimizer='adam',loss=tf.keras.losses.mse)
norm_layer.adapt(dat)
model.fit(dat,dat,batch_size=10,epochs=3,verbose=2)
model.save('test_model.h5')
print('\n', '*'*20, 'global scope: model saved', '*'*20)
########## 2: function scope ##########
def function_scope(dat):
input = tf.keras.Input(shape=dat.shape[1:])
norm_layer = tf.keras.layers.experimental.preprocessing.Normalization()
x = norm_layer(input)
output = tf.keras.layers.Lambda(lambda xx: xx * tf.sqrt(norm_layer.variance) + norm_layer.mean)(x)
model = tf.keras.Model(input,output)
model.compile(optimizer='adam',loss=tf.keras.losses.mse)
norm_layer.adapt(dat)
model.fit(dat,dat,batch_size=10,epochs=3,verbose=2)
model.save('test_model.h5')
print('*'*20, 'function scope: model saved', '*'*20)
function_scope(dat)
norm_layer = tf.keras.layers.experimental.preprocessing.Normalization()
x = norm_layer(input)
def unnnorm_closure(xx):
return xx * tf.sqrt(norm_layer.variance) + norm_layer.mean
output = tf.keras.layers.Lambda(lambda xx: unnnorm_closure(xx))(x)
class UnNormalization(tf.keras.layers.Layer):
def __init__(self, norm_mean, norm_var, **kwargs):
super(UnNormalization, self).__init__(**kwargs)
self.norm_mean = norm_mean
self.norm_var = norm_var
self.unnormalize = tf.keras.layers.Lambda(lambda xx: xx * tf.sqrt(self.norm_var) + self.norm_mean)
def call(self, inputs):
return self.unnormalize(inputs)
def get_config(self):
config = super(UnNormalization, self).get_config()
config.update({'norm_mean': self.norm_mean, 'norm_var': self.norm_var})
return config
但当上述代码位于功能范围内时:
dat = np.random.random(size=(100,7))
########## 1: global scope ##########
input = tf.keras.Input(shape=dat.shape[1:])
norm_layer = tf.keras.layers.experimental.preprocessing.Normalization()
x = norm_layer(input)
output = tf.keras.layers.Lambda(lambda xx: xx * tf.sqrt(norm_layer.variance) + norm_layer.mean)(x)
model = tf.keras.Model(input,output)
model.compile(optimizer='adam',loss=tf.keras.losses.mse)
norm_layer.adapt(dat)
model.fit(dat,dat,batch_size=10,epochs=3,verbose=2)
model.save('test_model.h5')
print('\n', '*'*20, 'global scope: model saved', '*'*20)
########## 2: function scope ##########
def function_scope(dat):
input = tf.keras.Input(shape=dat.shape[1:])
norm_layer = tf.keras.layers.experimental.preprocessing.Normalization()
x = norm_layer(input)
output = tf.keras.layers.Lambda(lambda xx: xx * tf.sqrt(norm_layer.variance) + norm_layer.mean)(x)
model = tf.keras.Model(input,output)
model.compile(optimizer='adam',loss=tf.keras.losses.mse)
norm_layer.adapt(dat)
model.fit(dat,dat,batch_size=10,epochs=3,verbose=2)
model.save('test_model.h5')
print('*'*20, 'function scope: model saved', '*'*20)
function_scope(dat)
norm_layer = tf.keras.layers.experimental.preprocessing.Normalization()
x = norm_layer(input)
def unnnorm_closure(xx):
return xx * tf.sqrt(norm_layer.variance) + norm_layer.mean
output = tf.keras.layers.Lambda(lambda xx: unnnorm_closure(xx))(x)
class UnNormalization(tf.keras.layers.Layer):
def __init__(self, norm_mean, norm_var, **kwargs):
super(UnNormalization, self).__init__(**kwargs)
self.norm_mean = norm_mean
self.norm_var = norm_var
self.unnormalize = tf.keras.layers.Lambda(lambda xx: xx * tf.sqrt(self.norm_var) + self.norm_mean)
def call(self, inputs):
return self.unnormalize(inputs)
def get_config(self):
config = super(UnNormalization, self).get_config()
config.update({'norm_mean': self.norm_mean, 'norm_var': self.norm_var})
return config
我得到以下错误:
File "/usr/lib/python3.7/copy.py", line 240, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/usr/lib/python3.7/copy.py", line 169, in deepcopy
rv = reductor(4)
TypeError: can't pickle _thread.RLock objects
我检查了解决方案,但只有一个似乎有效:在函数闭包中捕获norm\u层。方差和norm\u层。平均值:
dat = np.random.random(size=(100,7))
########## 1: global scope ##########
input = tf.keras.Input(shape=dat.shape[1:])
norm_layer = tf.keras.layers.experimental.preprocessing.Normalization()
x = norm_layer(input)
output = tf.keras.layers.Lambda(lambda xx: xx * tf.sqrt(norm_layer.variance) + norm_layer.mean)(x)
model = tf.keras.Model(input,output)
model.compile(optimizer='adam',loss=tf.keras.losses.mse)
norm_layer.adapt(dat)
model.fit(dat,dat,batch_size=10,epochs=3,verbose=2)
model.save('test_model.h5')
print('\n', '*'*20, 'global scope: model saved', '*'*20)
########## 2: function scope ##########
def function_scope(dat):
input = tf.keras.Input(shape=dat.shape[1:])
norm_layer = tf.keras.layers.experimental.preprocessing.Normalization()
x = norm_layer(input)
output = tf.keras.layers.Lambda(lambda xx: xx * tf.sqrt(norm_layer.variance) + norm_layer.mean)(x)
model = tf.keras.Model(input,output)
model.compile(optimizer='adam',loss=tf.keras.losses.mse)
norm_layer.adapt(dat)
model.fit(dat,dat,batch_size=10,epochs=3,verbose=2)
model.save('test_model.h5')
print('*'*20, 'function scope: model saved', '*'*20)
function_scope(dat)
norm_layer = tf.keras.layers.experimental.preprocessing.Normalization()
x = norm_layer(input)
def unnnorm_closure(xx):
return xx * tf.sqrt(norm_layer.variance) + norm_layer.mean
output = tf.keras.layers.Lambda(lambda xx: unnnorm_closure(xx))(x)
class UnNormalization(tf.keras.layers.Layer):
def __init__(self, norm_mean, norm_var, **kwargs):
super(UnNormalization, self).__init__(**kwargs)
self.norm_mean = norm_mean
self.norm_var = norm_var
self.unnormalize = tf.keras.layers.Lambda(lambda xx: xx * tf.sqrt(self.norm_var) + self.norm_mean)
def call(self, inputs):
return self.unnormalize(inputs)
def get_config(self):
config = super(UnNormalization, self).get_config()
config.update({'norm_mean': self.norm_mean, 'norm_var': self.norm_var})
return config
lambda包装对我不起作用:
norm_layer = tf.keras.layers.experimental.preprocessing.Normalization()
x = norm_layer(input)
var, mean = tf.keras.layers.Lambda(lambda xx: (xx[0],xx[1]))((norm_layer.variance, norm_layer.mean))
output = tf.keras.layers.Lambda(lambda xx: xx * tf.sqrt(var) + mean)(x)
自定义层也没有:
dat = np.random.random(size=(100,7))
########## 1: global scope ##########
input = tf.keras.Input(shape=dat.shape[1:])
norm_layer = tf.keras.layers.experimental.preprocessing.Normalization()
x = norm_layer(input)
output = tf.keras.layers.Lambda(lambda xx: xx * tf.sqrt(norm_layer.variance) + norm_layer.mean)(x)
model = tf.keras.Model(input,output)
model.compile(optimizer='adam',loss=tf.keras.losses.mse)
norm_layer.adapt(dat)
model.fit(dat,dat,batch_size=10,epochs=3,verbose=2)
model.save('test_model.h5')
print('\n', '*'*20, 'global scope: model saved', '*'*20)
########## 2: function scope ##########
def function_scope(dat):
input = tf.keras.Input(shape=dat.shape[1:])
norm_layer = tf.keras.layers.experimental.preprocessing.Normalization()
x = norm_layer(input)
output = tf.keras.layers.Lambda(lambda xx: xx * tf.sqrt(norm_layer.variance) + norm_layer.mean)(x)
model = tf.keras.Model(input,output)
model.compile(optimizer='adam',loss=tf.keras.losses.mse)
norm_layer.adapt(dat)
model.fit(dat,dat,batch_size=10,epochs=3,verbose=2)
model.save('test_model.h5')
print('*'*20, 'function scope: model saved', '*'*20)
function_scope(dat)
norm_layer = tf.keras.layers.experimental.preprocessing.Normalization()
x = norm_layer(input)
def unnnorm_closure(xx):
return xx * tf.sqrt(norm_layer.variance) + norm_layer.mean
output = tf.keras.layers.Lambda(lambda xx: unnnorm_closure(xx))(x)
class UnNormalization(tf.keras.layers.Layer):
def __init__(self, norm_mean, norm_var, **kwargs):
super(UnNormalization, self).__init__(**kwargs)
self.norm_mean = norm_mean
self.norm_var = norm_var
self.unnormalize = tf.keras.layers.Lambda(lambda xx: xx * tf.sqrt(self.norm_var) + self.norm_mean)
def call(self, inputs):
return self.unnormalize(inputs)
def get_config(self):
config = super(UnNormalization, self).get_config()
config.update({'norm_mean': self.norm_mean, 'norm_var': self.norm_var})
return config
还有另一个相关问题,但尚未得到回答。
虽然我设法解决了关闭的问题,但我想知道发生了什么。如果我理解正确,主要的问题是,在序列化的点上,norm\u层。mean
和norm\u层。方差
是Keras符号张量
我正在使用
Python 3.7.3,
Tensorflow 2.2.0