Python 每个历元的隐藏层输出,并将其存储在keras的列表中?

Python 每个历元的隐藏层输出,并将其存储在keras的列表中?,python,tensorflow,keras,neural-network,activation-function,Python,Tensorflow,Keras,Neural Network,Activation Function,我有一个单一隐藏层的keras MLP。我使用的是多层感知器,在一个隐藏层中有特定数量的节点。当批处理通过时,我想为隐藏层的所有神经元提取激活值,我想为每个历元提取激活值,并将其存储在一个列表中以供探索。我的陈述如下 class myNetwork: # Architecture of our neural network. def multilayerPerceptron(self, Num_Nodes_hidden,input_features,output_dims,activation_

我有一个单一隐藏层的keras MLP。我使用的是多层感知器,在一个隐藏层中有特定数量的节点。当批处理通过时,我想为隐藏层的所有神经元提取激活值,我想为每个历元提取激活值,并将其存储在一个列表中以供探索。我的陈述如下

class myNetwork:
# Architecture of our neural network.
def multilayerPerceptron(self, Num_Nodes_hidden,input_features,output_dims,activation_function = 'relu', learning_rate=0.001,
                        momentum_val=0.00):
    model = Sequential()
    model.add(Dense(Num_Nodes_hidden, input_dim =input_features, activation=activation_function))
    model.add(Dense(output_dims,activation='softmax'))

    model.compile(loss = "categorical_crossentropy",
                  optimizer=SGD(lr = learning_rate, momentum = momentum_val),
                  metrics=['accuracy'])
    return model
下面是我对另一部分的调用,我使用lambdacallbacks来保存权重。我想要一些类似的东西,但这次是为了保存隐藏层的实际激活值

from keras.callbacks import LambdaCallback
import pickle
from keras.callbacks import ModelCheckpoint
from keras.callbacks import CSVLogger



# setting_parameters and calling inputs.
val = myNetwork()
vals = val.multilayerPerceptron(8,4,3,'relu',0.01)
batch_size_val = 20
number_iters = 200
weights_ih = []
weights_ho = []
activation_vals = []


get_activtaion = LambdaCallback(on_epoch_end=lambda batch, logs: activation_vals.append("What should I put Here"))




print_weights = LambdaCallback(on_epoch_end=lambda batch, logs: weights_ih.append(vals.layers[0].get_weights()))
print_weights_1 = LambdaCallback(on_epoch_end=lambda batch, logs: weights_ho.append(vals.layers[1].get_weights()))



history_callback = vals.fit(X_train, Y_train,
                                 batch_size=batch_size_val,
                                 epochs=number_iters,
                                 verbose=0,
                                 validation_data=(X_test, Y_test),
                                 callbacks = [csv_logger,print_weights,print_weights_1,get_activtaion])

我非常困惑,我不知道我应该在GetActivtion中放些什么。请让我知道,为了获得该权重迭代值的批次所有样本的激活值,我应该在那里做些什么。

权重\u获取每层权重的回调:

weights_list = [] #[epoch][layer][unit(l-1)][unit(l)]

def save_weights(model):
    inner_list = []
    for layer in model.layers:
        inner_list.append(layer.get_weights()[0])
    weights_list.append(inner_list)

weights_callback = LambdaCallback(on_epoch_end = lambda batch, logs:save_weights(model))
activations_list = [] #[epoch][layer][0][X][unit]

def save_activations(model):
    outputs = [layer.output for layer in model.layers]
    functors = [K.function([model.input],[out]) for out in outputs]
    layer_activations = [f([X_input_vectors]) for f in functors]
    activations_list.append(layer_activations)

activations_callback = LambdaCallback(on_epoch_end = lambda batch, logs:save_activations(model))
result = model.fit(... , callbacks = [weights_callback, activations_callback], ...)
激活\u回调以获取每层的输出:

weights_list = [] #[epoch][layer][unit(l-1)][unit(l)]

def save_weights(model):
    inner_list = []
    for layer in model.layers:
        inner_list.append(layer.get_weights()[0])
    weights_list.append(inner_list)

weights_callback = LambdaCallback(on_epoch_end = lambda batch, logs:save_weights(model))
activations_list = [] #[epoch][layer][0][X][unit]

def save_activations(model):
    outputs = [layer.output for layer in model.layers]
    functors = [K.function([model.input],[out]) for out in outputs]
    layer_activations = [f([X_input_vectors]) for f in functors]
    activations_list.append(layer_activations)

activations_callback = LambdaCallback(on_epoch_end = lambda batch, logs:save_activations(model))
result = model.fit(... , callbacks = [weights_callback, activations_callback], ...)
应用回调:

weights_list = [] #[epoch][layer][unit(l-1)][unit(l)]

def save_weights(model):
    inner_list = []
    for layer in model.layers:
        inner_list.append(layer.get_weights()[0])
    weights_list.append(inner_list)

weights_callback = LambdaCallback(on_epoch_end = lambda batch, logs:save_weights(model))
activations_list = [] #[epoch][layer][0][X][unit]

def save_activations(model):
    outputs = [layer.output for layer in model.layers]
    functors = [K.function([model.input],[out]) for out in outputs]
    layer_activations = [f([X_input_vectors]) for f in functors]
    activations_list.append(layer_activations)

activations_callback = LambdaCallback(on_epoch_end = lambda batch, logs:save_activations(model))
result = model.fit(... , callbacks = [weights_callback, activations_callback], ...)
参考文献:

weights_list = [] #[epoch][layer][unit(l-1)][unit(l)]

def save_weights(model):
    inner_list = []
    for layer in model.layers:
        inner_list.append(layer.get_weights()[0])
    weights_list.append(inner_list)

weights_callback = LambdaCallback(on_epoch_end = lambda batch, logs:save_weights(model))
activations_list = [] #[epoch][layer][0][X][unit]

def save_activations(model):
    outputs = [layer.output for layer in model.layers]
    functors = [K.function([model.input],[out]) for out in outputs]
    layer_activations = [f([X_input_vectors]) for f in functors]
    activations_list.append(layer_activations)

activations_callback = LambdaCallback(on_epoch_end = lambda batch, logs:save_activations(model))
result = model.fit(... , callbacks = [weights_callback, activations_callback], ...)