Python 如何将KerasClassifier、Hyperopt和Sklearn交叉验证放在一起

Python 如何将KerasClassifier、Hyperopt和Sklearn交叉验证放在一起,python,keras,scikit-learn,cross-validation,hyperopt,Python,Keras,Scikit Learn,Cross Validation,Hyperopt,我正在Keras模型上使用sklearn执行超参数调优优化(hyperopt)任务。我正在尝试使用Sklearn交叉验证优化KerasClassifier,一些代码如下: def create_model(): model = Sequential() model.add( Dense(output_dim=params['units1'], input_dim=features_.shape[1], ker

我正在Keras模型上使用sklearn执行超参数调优优化(hyperopt)任务。我正在尝试使用Sklearn交叉验证优化KerasClassifier,一些代码如下:

def create_model():
    model = Sequential()
    model.add(
        Dense(output_dim=params['units1'],
              input_dim=features_.shape[1],
              kernel_initializer="glorot_uniform"))
    model.add(Activation(params['activation']))
    model.add(Dropout(params['dropout1']))
    model.add(BatchNormalization())
    ...
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    return model
现在我要做的是使用以下方法将Hyperopt参数传递给KerasClassifier

def objective(params, n_folds=N_FOLDS):
    """Objective function for Hyperparameter Optimization"""

    # Keep track of evals
    global ITERATION

    ITERATION += 1

    clf = KerasClassifier(build_fn=create_model,**params)

    start = timer()

    # Perform n_folds cross validation
    cv_results = cross_val_score(clf,
                                 features_,
                                 labels,
                                 cv=5
                                 ).mean()

    run_time = timer() - start

    # Loss must be minimized
    loss = -cv_results

    # Dictionary with information for evaluation
    return {
        'loss': loss,
        'params': params,
        'iteration': ITERATION,
        'train_time': run_time,
        'status': STATUS_OK
    }
我将搜索空间定义为:

space = {'units1': hp.choice('units1', [64, 128, 256, 512]),
    'units2': hp.choice('units2', [64, 128, 256, 512]),
    'dropout1': hp.choice('dropout1', [0.25, 0.5, 0.75]),
    'dropout2': hp.choice('dropout2', [0.25, 0.5, 0.75]),
    'batch_size': hp.choice('batch_size', [10, 20, 40, 60, 80, 100]),
    'nb_epochs': hp.choice('nb_epochs', [10, 50, 100]),
    'optimizer': opt_search_space,
    'activation': 'relu' }
运行优化

best = fmin(fn = objective, space = space, algo = tpe.suggest, 
            max_evals = MAX_EVALS, trials = bayes_trials, rstate = np.random.RandomState(50))
但它没有给出这个错误:

ValueError:激活不是合法参数


正确的方法是什么?

将超参数作为
create\u model
函数的输入参数。然后您可以输入
params
dict。还可以在搜索空间中将键
nb_epochs
更改为
epochs
。请阅读有关其他有效参数的详细信息

请尝试下面的简化示例

import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from tensorflow.keras import Sequential
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dense, Dropout

import time

def timer():
   now = time.localtime(time.time())
   return now[5]


X, y = make_classification(n_samples=1000, n_classes=2,
                           n_informative=4, weights=[0.7, 0.3],
                           random_state=0)
定义
keras
模型:

def create_model(units1, activation, dropout):
    model = Sequential()
    model.add(Dense(units1,
                    input_dim=X.shape[1],
                    kernel_initializer="glorot_uniform",
                    activation=activation))
    model.add(Dropout(dropout))
    model.add(Dense(1,activation='sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    return model

谢谢你的回答,请看看我下面的问题,这会有帮助的!当你有问题时,请问一个新问题。不要将问题作为答案发布。我问了我的新问题(),请看一看,谢谢。
def objective(params, n_folds=2):
    """Objective function for Hyperparameter Optimization"""

    # Keep track of evals
    global ITERATION

    ITERATION += 1

    clf = KerasClassifier(build_fn=create_model,**params)

    start = timer()

    # Perform n_folds cross validation
    cv_results = cross_val_score(clf, X, y,
                                 cv=5, 
                                 ).mean()

    run_time = timer() - start

    # Loss must be minimized
    loss = -cv_results

    # Dictionary with information for evaluation
    return {
        'loss': loss,
        'params': params,
        'iteration': ITERATION,
        'train_time': run_time,
        'status': STATUS_OK
    }

from hyperopt import fmin, tpe, hp, Trials, STATUS_OK

space = {'units1': hp.choice('units1', [12, 64]),
         'dropout': hp.choice('dropout1', [0.25, 0.5]),
         'batch_size': hp.choice('batch_size', [10, 20]),
         'epochs': hp.choice('nb_epochs', [2, 3]),
         'activation': 'relu'
        }

global ITERATION
ITERATION = 0

bayes_trials = Trials()

best = fmin(fn = objective, space = space, algo = tpe.suggest, 
            max_evals = 5, trials = bayes_trials, rstate = np.random.RandomState(50))