Python 使用GridSearchCV提前停止-使用保留CV集进行验证

Python 使用GridSearchCV提前停止-使用保留CV集进行验证,python,scikit-learn,xgboost,grid-search,Python,Scikit Learn,Xgboost,Grid Search,我想使用scikit learnsGridSearchCV-方法中的提前停止-选项。这方面的一个例子如下所示: 但是,我想使用交叉验证过程的保持集作为验证集。是否有办法在GridSearchCV中指定此选项?这在目前的xgboost实现中是不可能的(参考版本0.6和0.7)。 请注意本机xgboost之间的差异 xgboost.train(params, dtrain, num_boost_round=10, evals=(), obj=None, feval=None, maximi

我想使用scikit learns
GridSearchCV
-方法中的
提前停止
-选项。这方面的一个例子如下所示:


但是,我想使用交叉验证过程的保持集作为验证集。是否有办法在
GridSearchCV
中指定此选项?

这在目前的xgboost实现中是不可能的(参考版本0.6和0.7)。 请注意本机xgboost之间的差异

    xgboost.train(params, dtrain, num_boost_round=10, evals=(), obj=None, 
feval=None, maximize=False, early_stopping_rounds=None, evals_result=None, 
verbose_eval=True, xgb_model=None, callbacks=None, learning_rates=None)

以及sklearn界面

    class xgboost.XGBRegressor(max_depth=3, learning_rate=0.1, 
n_estimators=100, silent=True, objective='reg:linear', booster='gbtree', 
n_jobs=1, nthread=None, gamma=0, min_child_weight=1, max_delta_step=0, 
subsample=1, colsample_bytree=1, colsample_bylevel=1, reg_alpha=0, 
reg_lambda=1, scale_pos_weight=1, base_score=0.5, random_state=0, seed=None, 
missing=None, **kwargs)
正如您所看到的,
xgboost.XGBRegressor
没有提前停止这样的事情。请注意,sklearn接口是唯一可以与GridSearchCV结合使用的接口,它需要一个适当的sklearn估计器和.fit()、.predict()等

您可以将
提前停止轮
评估集
作为一个额外的拟合参数传递给GridSearchCV,这实际上是可行的。 但是,
GridSearchCV
不会更改不同折叠之间的拟合参数,因此最终会在所有折叠中使用相同的
eval\u集
,这可能不是您所说的CV

model=xgb.XGBClassifier()
clf = GridSearchCV(model, parameters,
                         fit_params={'early_stopping_rounds':20,\
                         'eval_set':[(X,y)]},cv=kfold)  
经过一些调整后,我找到了集成
提前停止\u轮的最安全的方法
,而sklearn API就是自己实现提前停止机制。如果使用
n_rounds
作为要调整的参数执行
GridSearchCV
,则可以执行此操作。然后,您可以观察不同型号的
mean\u validation\u分数
,并增加
n轮
。然后,您可以为提前停止定义自定义启发式;您会注意到,可以说,默认值不是最佳值


我认为这也是一种更好的方法,而不是使用单一的分离保持来实现这一目的。

早在我构建类的时候,我就已经构建了一个类,包装了包“HyperOpt”,以满足我的需要

我会尽量为您快速最小化它,以便您可以使用它。下面是代码和一些注释,以帮助您解决问题:

import numpy as np
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import xgboost as xgb
max_float_digits = 4


def rounded(val):
    return '{:.{}f}'.format(val, max_float_digits)


class HyperOptTuner(object):
    """
    Tune my parameters!
    """
    def __init__(self, dtrain, dvalid, early_stopping=200, max_evals=200):
        self.counter = 0
        self.dtrain = dtrain
        self.dvalid = dvalid
        self.early_stopping = early_stopping
        self.max_evals = max_evals
        self.tuned_params = None


    def score(self, params):
        self.counter += 1
        # Edit params
        print("Iteration {}/{}".format(self.counter, self.max_evals))
        num_round = int(params['n_estimators'])
        del params['n_estimators']

        watchlist = [(self.dtrain, 'train'), (self.dvalid, 'eval')]
        model = xgb.train(params, self.dtrain, num_round, evals=watchlist, early_stopping_rounds=self.early_stopping,
                          verbose_eval=False)
        n_epoach = model.best_ntree_limit
        score = model.best_score
        params['n_estimators'] = n_epoach
        params = dict([(key, rounded(params[key]))
                       if type(params[key]) == float
                       else (key, params[key])
                       for key in params])

        print "Trained with: "
        print params
        print "\tScore {0}\n".format(score)
        return {'loss': 1 - score, 'status': STATUS_OK, 'params': params}

    def optimize(self, trials):
        space = {
            'n_estimators': 2000,  # hp.quniform('n_estimators', 10, 1000, 10),
            'eta': hp.quniform('eta', 0.025, 0.3, 0.025),
            'max_depth': hp.choice('max_depth', np.arange(1, 9, dtype=int)),
            'min_child_weight': hp.choice('min_child_weight', np.arange(1, 10, dtype=int)),
            'subsample': hp.quniform('subsample', 0.3, 1, 0.05),
            'gamma': hp.quniform('gamma', 0.1, 20, 0.1),
            'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.25),
            'eval_metric': 'map',
            'objective': 'rank:pairwise',
            'silent': 1
        }

        fmin(self.score, space, algo=tpe.suggest, trials=trials, max_evals=self.max_evals),

        min_loss = 1
        min_params = {}
        for trial in trials.trials:
            tmp_loss, tmp_params = trial['result']['loss'], trial['result']['params']
            if tmp_loss < min_loss:
                min_loss, min_params = tmp_loss, tmp_params

        print("Winning params:")
        print(min_params)
        print "\tScore: {}".format(1-min_loss)
        self.tuned_params = min_params

    def tune(self):
        print "Tuning...\n"
        # Trials object where the history of search will be stored
        trials = Trials()
        self.optimize(trials)
其中
max_evals
是“搜索网格”的大小


遵循这些指导原则,如果您遇到问题,请告诉我。

人们对此感到疑惑已有一段时间了;令人奇怪的是,我们不能将CV折叠用于此目的,您可以使用hyperOpt软件包来实现它。@EranMoshe谢谢。你有一个例子吗?可能是真的重复,但他善意地要求hyperopt解决方案。我认为这是一个更好的解决方案,它会产生更好的结果,我对此表示赞同。它是否也比randomizedgridsearch好?是的。它也优于贝叶斯优化。但我只在我的域名上查过。所以我建议在其他领域也测试它,不要完全依赖1:>
model=xgb.XGBClassifier()
clf = GridSearchCV(model, parameters,
                         fit_params={'early_stopping_rounds':20,\
                         'eval_set':[(X,y)]},cv=kfold)  
import numpy as np
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import xgboost as xgb
max_float_digits = 4


def rounded(val):
    return '{:.{}f}'.format(val, max_float_digits)


class HyperOptTuner(object):
    """
    Tune my parameters!
    """
    def __init__(self, dtrain, dvalid, early_stopping=200, max_evals=200):
        self.counter = 0
        self.dtrain = dtrain
        self.dvalid = dvalid
        self.early_stopping = early_stopping
        self.max_evals = max_evals
        self.tuned_params = None


    def score(self, params):
        self.counter += 1
        # Edit params
        print("Iteration {}/{}".format(self.counter, self.max_evals))
        num_round = int(params['n_estimators'])
        del params['n_estimators']

        watchlist = [(self.dtrain, 'train'), (self.dvalid, 'eval')]
        model = xgb.train(params, self.dtrain, num_round, evals=watchlist, early_stopping_rounds=self.early_stopping,
                          verbose_eval=False)
        n_epoach = model.best_ntree_limit
        score = model.best_score
        params['n_estimators'] = n_epoach
        params = dict([(key, rounded(params[key]))
                       if type(params[key]) == float
                       else (key, params[key])
                       for key in params])

        print "Trained with: "
        print params
        print "\tScore {0}\n".format(score)
        return {'loss': 1 - score, 'status': STATUS_OK, 'params': params}

    def optimize(self, trials):
        space = {
            'n_estimators': 2000,  # hp.quniform('n_estimators', 10, 1000, 10),
            'eta': hp.quniform('eta', 0.025, 0.3, 0.025),
            'max_depth': hp.choice('max_depth', np.arange(1, 9, dtype=int)),
            'min_child_weight': hp.choice('min_child_weight', np.arange(1, 10, dtype=int)),
            'subsample': hp.quniform('subsample', 0.3, 1, 0.05),
            'gamma': hp.quniform('gamma', 0.1, 20, 0.1),
            'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.25),
            'eval_metric': 'map',
            'objective': 'rank:pairwise',
            'silent': 1
        }

        fmin(self.score, space, algo=tpe.suggest, trials=trials, max_evals=self.max_evals),

        min_loss = 1
        min_params = {}
        for trial in trials.trials:
            tmp_loss, tmp_params = trial['result']['loss'], trial['result']['params']
            if tmp_loss < min_loss:
                min_loss, min_params = tmp_loss, tmp_params

        print("Winning params:")
        print(min_params)
        print "\tScore: {}".format(1-min_loss)
        self.tuned_params = min_params

    def tune(self):
        print "Tuning...\n"
        # Trials object where the history of search will be stored
        trials = Trials()
        self.optimize(trials)
# dtrain is a training set of type DMatrix
# dtest is a testing set of type DMatrix
tuner = HyperOptTuner(dtrain=dtrain, dvalid=dtest, early_stopping=200, max_evals=400)
tuner.tune()