Python 如何在Keras.模型中得到l2正则化损失值?

Python 如何在Keras.模型中得到l2正则化损失值?,python,tensorflow,keras,Python,Tensorflow,Keras,我使用keras.Model来构建模型,但是我使用了一个定制的损失函数,一个定制的训练过程,我编写了迭代过程和sess.run,然后我想在迭代过程中得到权重l2的损失,怎么做 支持该模型的方法如下: def model(): x=输入(形状=(无,无,3)) y=Conv2D(10,3,步幅=1,核函数初始化器=tf.glorot\u uniform\u初始化器(),核函数正则化器=regularizers.l2(0.0005))(x) y=Conv2D(16,3,步长=1,核函数初始化器=tf

我使用keras.Model来构建模型,但是我使用了一个定制的损失函数,一个定制的训练过程,我编写了迭代过程和sess.run,然后我想在迭代过程中得到权重l2的损失,怎么做

支持该模型的方法如下:

def model():
x=输入(形状=(无,无,3))
y=Conv2D(10,3,步幅=1,核函数初始化器=tf.glorot\u uniform\u初始化器(),核函数正则化器=regularizers.l2(0.0005))(x)
y=Conv2D(16,3,步长=1,核函数初始化器=tf.glorot\u uniform\u初始化器(),核函数正则化器=regularizers.l2(0.0005))(y)
y=Conv2D(32,3,步幅=1,核函数初始化器=tf.glorot\u uniform\u初始化器(),核函数正则化器=regularizers.l2(0.0005))(y)
y=Conv2D(16,3,步长=1,核函数初始化器=tf.glorot\u uniform\u初始化器(),核函数正则化器=regularizers.l2(0.0005))(y)
y=Conv2D(1,3,步幅=1,kernel_initializer=tf.glorot_uniform_initializer(),kernel_regulazer=regulazers.l2(0.0005))(y)
返回模型(输入=[x],输出=[y])
def损失(y_真,y_pred):
返回tf.softmax_损失(…)
列车代码:

def列():
dataset=tf.TFRecordDataset(tfrecords).make\u one\u shot\u iterator().get\u next()
input_image=tf.占位符(…)
label=tf.占位符(…)
net=model()
pred=模型(输入图像)
损耗=损耗(标签,pred)
尽管如此:
imgs,loss=sess.run([dataset,loss\u op])

通过以上代码,我认为我没有减轻体重。我怎样才能得到它?我尝试使用
l2\u loss\u op=tf.reduce\u sum(tf.get\u collection(tf.GraphKeys.REGULARIZATION\u loss))
,但值为0。

更新:更干净、更完整的实现

我编写了一个自定义函数,用于返回所有层的
l1
l2
、和
l1_l2
损失,包括经常性损失;不包括非重量损失的
活动\u正则化器
损失:

def l1l2_weight_loss(model):
    l1l2_loss = 0
    for layer in model.layers:
        if 'layer' in layer.__dict__ or 'cell' in layer.__dict__:
            l1l2_loss += _l1l2_rnn_loss(layer)
            continue
            
        if 'kernel_regularizer' in layer.__dict__ or \
           'bias_regularizer'   in layer.__dict__:
            l1l2_lambda_k, l1l2_lambda_b = [0,0], [0,0] # defaults
            if layer.__dict__['kernel_regularizer'] is not None:
                l1l2_lambda_k = list(layer.kernel_regularizer.__dict__.values())
            if layer.__dict__['bias_regularizer']   is not None:
                l1l2_lambda_b = list(layer.bias_regularizer.__dict__.values())
                
            if any([(_lambda != 0) for _lambda in (l1l2_lambda_k + l1l2_lambda_b)]):
                W = layer.get_weights()
    
                for idx,_lambda in enumerate(l1l2_lambda_k + l1l2_lambda_b):
                    if _lambda != 0:
                        _pow = 2**(idx % 2) # 1 if idx is even (l1), 2 if odd (l2)
                        l1l2_loss += _lambda*np.sum(np.abs(W[idx//2])**_pow)
    return l1l2_loss

测试实施:

0.763822——克朗损失
0.763822——自定义损耗 (执行情况见我的答复)

def _l1l2_rnn_loss(layer):
    l1l2_loss = 0
    if 'backward_layer' in layer.__dict__:
        bidirectional = True
        _layer = layer.layer
    else:
        _layer = layer
        bidirectional = False
    ldict = _layer.cell.__dict__
        
    if 'kernel_regularizer'    in ldict or \
       'recurrent_regularizer' in ldict or \
       'bias_regularizer'      in ldict:
        l1l2_lambda_k, l1l2_lambda_r, l1l2_lambda_b = [0,0], [0,0], [0,0]
        if ldict['kernel_regularizer']    is not None:
            l1l2_lambda_k = list(_layer.kernel_regularizer.__dict__.values())
        if ldict['recurrent_regularizer'] is not None:
            l1l2_lambda_r = list(_layer.recurrent_regularizer.__dict__.values())
        if ldict['bias_regularizer']      is not None:
            l1l2_lambda_b = list(_layer.bias_regularizer.__dict__.values())
        
        all_lambda = l1l2_lambda_k + l1l2_lambda_r + l1l2_lambda_b
        if any([(_lambda != 0) for _lambda in all_lambda]):
            W = layer.get_weights()
            idx_incr = len(W)//2 # accounts for 'use_bias'
            
            for idx,_lambda in enumerate(all_lambda):
                if _lambda != 0:
                    _pow = 2**(idx % 2) # 1 if idx is even (l1), 2 if odd (l2)
                    l1l2_loss += _lambda*np.sum(np.abs(W[idx//2])**_pow)
                    if bidirectional:
                        l1l2_loss += _lambda*np.sum(
                                    np.abs(W[idx//2 + idx_incr])**_pow)
        return l1l2_loss  
from keras.layers import Input, Dense, LSTM, GRU, Bidirectional
from keras.models import Model
from keras.regularizers import l1, l2, l1_l2
import numpy as np 

ipt   = Input(shape=(1200,16))
x     = LSTM(60, activation='relu', return_sequences=True,
                                                 recurrent_regularizer=l2(1e-3),)(ipt)
x     = Bidirectional(GRU(60, activation='relu', bias_regularizer     =l1(1e-4)))(x)
out   = Dense(1,  activation='sigmoid',          kernel_regularizer   =l1_l2(2e-4))(x)
model = Model(ipt,out)

model.compile(loss='binary_crossentropy', optimizer='adam')
X = np.random.rand(10,1200,16) # (batch_size, timesteps, input_dim)
Y = np.random.randint(0,2,(10,1))
keras_loss   = model.evaluate(X,Y)
custom_loss  = binary_crossentropy(Y, model.predict(X))
custom_loss += l1l2_weight_loss(model)

print('%.6f'%keras_loss  + ' -- keras_loss')
print('%.6f'%custom_loss + ' -- custom_loss')