Python 将GridSearchCV与keras函数API一起使用
在线上有很多关于如何将GridSearchCV与顺序API一起使用的教程,但关于函数API的教程很少 运行grid.fit时,以下代码失败 问题是,对于输入,应该向Python 将GridSearchCV与keras函数API一起使用,python,tensorflow,keras,Python,Tensorflow,Keras,在线上有很多关于如何将GridSearchCV与顺序API一起使用的教程,但关于函数API的教程很少 运行grid.fit时,以下代码失败 问题是,对于输入,应该向tensorflow.keras.Model输入什么 model = tensorflow.keras.Model(inputs=inputs, outputs=core_out) 通常对于keras项目,您可以传递一个输入层列表(在我的代码中由inputs\u和\u encoded\u features()生成),但我认为您不能将
tensorflow.keras.Model
输入什么
model = tensorflow.keras.Model(inputs=inputs, outputs=core_out)
通常对于keras项目,您可以传递一个输入层列表(在我的代码中由inputs\u和\u encoded\u features()
生成),但我认为您不能将其传递到
问题可能来自于尝试使用tensorflow.keras.layers.experimental.preprocessing
接口来生成传递给模型的编码特征
def build_functional_model(encoded_features, g1dim=10, g2dim=1, c1dim=2500, c2dim=500, c3dim=1, optimizer='ada_grad', learning_rate=0.01, ca='relu' ):
# Attempt 1: Fails with error: ValueError: A `Concatenate` layer should be called on a list of at least 2 inputs
# all_features = tf.keras.layers.concatenate(encoded_features)
# Attempt 2: Fails with error: ValueError: A `Concatenate` layer should be called on a list of at least 2 inputs
inputs = Input(shape=(len(encoded_features.graph.inputs,)))
all_features = tf.keras.layers.concatenate(inputs)
other_norm = Normalization(name='other_norm')(all_features)
core_layer1 = Dense(c1dim, activation=ca, name='core_layer1')(other_norm)
core_layer2 = tf.keras.layers.Dropout(0.2)(core_layer1)
core_layer3 = Dense(c2dim, activation=ca, name='core_layer3')(core_layer2)
core_out = Dense(1, name='core_out')(core_layer3)
model = tensorflow.keras.Model(inputs=inputs, outputs=core_out)
if optimizer == 'ada_grad':
_opt = Adagrad(learning_rate=lr_schedule)
elif optimizer == 'SGD':
_opt = SGD(learning_rate=lr_schedule)
elif optimizer == 'RMSprop':
_opt = RMSprop(learning_rate=lr_schedule)
else: # optimizer == 'adam'
_opt = Adam(learning_rate=lr_schedule)
model.compile(loss='mse', optimizer=_opt, metrics=[MeanSquaredError(), MeanAbsolutePercentageError()]) # CosineSimilarity(), MeanAbsoluteError(),
tf.keras.utils.plot_model(model, 'model.png', show_shapes=False, rankdir='LR')
return model
def grid_search(X, Y, encoded_features):
lrs = [0.3, 0.1, 0.01]
c1dim = [1000, 500, 100]
c2dim = [300, 100, 20]
activations = ['relu', 'tanh', 'softplus' 'elu']
optimizers = ['ada_grad','SGD','RMSprop','adam']
model_grid = KerasRegressor(build_fn=build_functional_model, verbose=0) # Only works if build_fn is not within a class
param_grid = dict(ca=activations,
optimizer=optimizers,
encoded_features=encoded_features,
epochs=[250],
batch_size=[256],
)
grid = GridSearchCV(estimator=model_grid,
param_grid=param_grid,
cv = 3,
verbose=2, n_jobs=1) # n_jobs=1 required as keras currently doesn't support pickle which is needed for multiprocessing
grid_result = grid.fit(X, Y)
class () ...
...
def inputs_and_encoded_features(self):
def add_column(header, layer, dtype=None):
if not dtype:
column = tf.keras.Input(shape=(1,), name=header)
else:
column = tf.keras.Input(shape=(1,), name=header, dtype=dtype)
encoded_col = layer(column)
inputs.append(column)
encoded_features.append(encoded_col) # we might want a seperate gps_features if we want to feed it in seperately
# build feature columns
inputs = []
encoded_features = []
for header in self.gps_columns:
normalization_layer = self.get_normalization_layer(header, self.train_ds)
add_column(header, normalization_layer)
for header in self.numeric_columns:
normalization_layer = self.get_normalization_layer(header, self.train_ds)
add_column(header, normalization_layer)
for header in self.word_columns:
normalization_layer = self.get_normalization_layer(header, self.train_ds)
add_column(header, normalization_layer) # we might want a seperate gps_features if we want to feed it in seperately
for header in self.category_columns:
encoding_layer = self.get_category_encoding_layer(header, self.train_ds, dtype='string',max_tokens=5)
add_column(header, encoding_layer, dtype='string')
self.inputs = inputs
self.encoded_features = encoded_features
return encoded_features