Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/359.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 通过应用RFE选择提供最佳调整R平方值的特征子集_Python_Python 3.x_Scikit Learn_Linear Regression_Rfe - Fatal编程技术网

Python 通过应用RFE选择提供最佳调整R平方值的特征子集

Python 通过应用RFE选择提供最佳调整R平方值的特征子集,python,python-3.x,scikit-learn,linear-regression,rfe,Python,Python 3.x,Scikit Learn,Linear Regression,Rfe,我有两个目标。我想: 通过特征值1-10循环,然后 比较调整后的R平方值 我知道如何只为下面代码中显示的一个固定功能执行此操作。我曾尝试循环使用selector=RFE(regr,n\u features\u to\u select,step=1),但我认为我遗漏了谜题的关键部分。谢谢大家! from sklearn.feature_selection import RFE regr = LinearRegression() #parameters: estimator, n_features_

我有两个目标。我想:

  • 通过特征值1-10循环,然后
  • 比较调整后的R平方值 我知道如何只为下面代码中显示的一个固定功能执行此操作。我曾尝试循环使用
    selector=RFE(regr,n\u features\u to\u select,step=1)
    ,但我认为我遗漏了谜题的关键部分。谢谢大家!

    from sklearn.feature_selection import RFE
    regr = LinearRegression()
    #parameters: estimator, n_features_to_select=None, step=1
    
    selector = RFE(regr, 5, step=1)
    selector.fit(x_train, y_train)
    selector.support_
    
    def show_best_model(support_array, columns, model):
        y_pred = model.predict(X_test.iloc[:, support_array])
        r2 = r2_score(y_test, y_pred)
        n = len(y_pred) #size of test set
        p = len(model.coef_) #number of features
        adjusted_r2 = 1-(1-r2)*(n-1)/(n-p-1)
        print('Adjusted R-squared: %.2f' % adjusted_r2)
        j = 0;
            for i in range(len(support_array)):
            if support_array[i] == True:
                print(columns[i], model.coef_[j])
                j +=1
    
    
    show_best_model(selector.support_, x_train.columns, selector.estimator_)
    

    您可以创建一个自定义的
    GridSearchCV
    ,它对估计器的指定参数值执行穷举搜索

    您还可以选择任何可用的评分功能,例如在Scikit learn中。但是,您可以使用给定的简单公式从R2分数计算调整后的R2,然后在自定义
    GridSearchCV
    中实现它

    from collections import OrderedDict
    from itertools import product
    from sklearn.feature_selection import RFE
    from sklearn.linear_model import LinearRegression
    from sklearn.datasets import load_iris
    from sklearn.metrics import r2_score
    from sklearn.model_selection import StratifiedKFold
    
    
    def customR2Score(y_true, y_pred, n, p):
        """
        Workaround for the adjusted R^2 score
        :param y_true: Ground Truth during iterations
        :param y_pred: Y predicted during iterations
        :param n: the sample size
        :param p: the total number of explanatory variables in the model
        :return: float, adjusted R^2 score
        """
        r2 = r2_score(y_true, y_pred)
        return 1 - (1 - r2) * (n - 1) / (n - p - 1)
    
    
    def CustomGridSearchCV(X, Y, param_grid, n_splits=10, n_repeats=3):
        """
        Perform GridSearchCV using adjusted R^2 as Scoring.
        Note here we are performing GridSearchCV MANUALLY because adjusted R^2
        cannot be used directly in the GridSearchCV function builtin in Scikit-learn
        :param X: array_like, shape (n_samples, n_features), Samples.
        :param Y: array_like, shape (n_samples, ), Target values.
        :param param_grid: Dictionary with parameters names (string) as keys and lists
                           of parameter settings to try as values, or a list of such
                           dictionaries, in which case the grids spanned by each dictionary
                           in the list are explored. This enables searching over any
                           sequence of parameter settings.
        :param n_splits: Number of folds. Must be at least 2. default=10
        :param n_repeats: Number of times cross-validator needs to be repeated. default=3
        :return: an Ordered Dictionary of the model object and information and best parameters
        """
        best_model = OrderedDict()
        best_model['best_params'] = {}
        best_model['best_train_AdjR2'], best_model['best_cross_AdjR2'] = 0, 0
        best_model['best_model'] = None
    
        allParams = OrderedDict()
        for key, value in param_grid.items():
            allParams[key] = value
    
        for items in product(*allParams.values()):
            params = {}
            i = 0
            for k in allParams.keys():
                params[k] = items[i]
                i += 1
            # at this point, we get different combination of parameters
            model_ = RFE(**params)
            avg_AdjR2_train = 0.
            avg_AdjR2_cross = 0.
            for rep in range(n_repeats):
                skf = StratifiedKFold(n_splits=n_splits, shuffle=True)
                AdjR2_train = 0.
                AdjR2_cross = 0.
                for train_index, cross_index in skf.split(X, Y):
                    x_train, x_cross = X[train_index], X[cross_index]
                    y_train, y_cross = Y[train_index], Y[cross_index]
                    model_.fit(x_train, y_train)
                    # find Adjusted R2 of train and cross
                    y_pred_train = model_.predict(x_train)
                    y_pred_cross = model_.predict(x_cross)
                    AdjR2_train += customR2Score(y_train, y_pred_train, len(y_train), model_.n_features_)
                    AdjR2_cross += customR2Score(y_cross, y_pred_cross, len(y_cross), model_.n_features_)
                AdjR2_train /= n_splits
                AdjR2_cross /= n_splits
                avg_AdjR2_train += AdjR2_train
                avg_AdjR2_cross += AdjR2_cross
            avg_AdjR2_train /= n_repeats
            avg_AdjR2_cross /= n_repeats
            # store the results of the first set of parameters combination
            if abs(avg_AdjR2_cross) >= abs(best_model['best_cross_AdjR2']):
                best_model['best_params'] = params
                best_model['best_train_AdjR2'] = avg_AdjR2_train
                best_model['best_cross_AdjR2'] = avg_AdjR2_cross
                best_model['best_model'] = model_
    
        return best_model
    
    
    
    # Dataset for testing
    iris = load_iris()
    X = iris.data
    Y = iris.target
    
    
    regr = LinearRegression()
    
    param_grid = {'estimator': [regr],  # you can try different estimator
                  'n_features_to_select': range(1, X.shape[1] + 1)}
    
    best_model = CustomGridSearchCV(X, Y, param_grid, n_splits=5, n_repeats=2)
    
    print(best_model)
    print(best_model['best_model'].ranking_)
    print(best_model['best_model'].support_)
    
    测试结果
    谢谢Yahya的回复。我还没有机会测试它。我对python相当陌生,因此我将尝试从您的回答中学习

    尽管如此,我还是找到了解决问题的办法。这是给未来的学习者的

    def show_best_model(support_array, columns, model):
        y_pred = model.predict(X_test.iloc[:, support_array])
        r2 = r2_score(y_test, y_pred)
        n = len(y_pred) #size of test set
        p = len(model.coef_) #number of features
        adjusted_r2 = 1-(1-r2)*(n-1)/(n-p-1)
        print('Adjusted R-squared: %.2f' % adjusted_r2)
        j = 0;
        for i in range(len(support_array)):
            if support_array[i] == True:
                print(columns[i], model.coef_[j])
                j +=1
    
    from sklearn.feature_selection import RFE
    regr = LinearRegression()
    
    for m in range(1,11):
        selector = RFE(regr, m, step=1) 
        selector.fit(x_train, y_train)
        if m<11:
            show_best_model(selector.support_, x_train.columns, selector.estimator_)
    
    X = df.loc[:,['Age_08_04', 'KM', 'HP', 'Weight', 'Automatic_airco']]
    x_train, X_test, y_train, y_test = train_test_split(X, y,
                                                        test_size =.4,
                                                        random_state = 20)
    regr = LinearRegression()
    regr.fit(x_train, y_train)
    y_pred = regr.predict(X_test)
    print('Average error: %.2f' %mean(y_test - y_pred))
    print('Mean absolute error: %.2f' %mean_absolute_error(y_test, y_pred))
    print('Mean absolute error: %.2f' %(mean(abs(y_test - y_pred))))
    print("Root mean squared error: %.2f"
          % math.sqrt(mean_squared_error(y_test, y_pred)))
    print('percentage absolute error: %.2f' %mean(abs((y_test - y_pred)/y_test)))
    print('percentage absolute error: %.2f' %(mean(abs(y_test - y_pred))/mean(y_test)))
    print('R-squared: %.2f' % r2_score(y_test, y_pred))
    
    x_train = x_train.loc[:,
                          ['Age_08_04', 'KM' , 'HP',
                           'Weight', 'Automatic_airco']]
    X_test = X_test.loc[:,
                        ['Age_08_04', 'KM' , 'HP',
                         'Weight', 'Automatic_airco']]
    selector = RFE(regr, 5, step=1)
    selector.fit(x_train, y_train)
    show_best_model(selector.support_, x_train.columns, selector.estimator_)
    
    def显示最佳模型(支持数组、列、模型):
    y_pred=model.predict(X_test.iloc[:,support_数组])
    r2=r2_分数(y_测试,y_预测)
    n=长度(y_pred)#测试集的大小
    p=len(model.coef)#特征数量
    调整后的r2=1-(1-r2)*(n-1)/(n-p-1)
    打印('调整后的R平方:%.2f'%Adjusted\u r2)
    j=0;
    对于范围内的i(len(支持_数组)):
    如果支持_数组[i]==True:
    打印(第[i]列,model.coef_j]列)
    j+=1
    从sklearn.feature_选择导入RFE
    regr=线性回归()
    对于范围(1,11)内的m:
    选择器=RFE(重新,m,步骤=1)
    选择器。安装(x_系列、y_系列)
    
    如果mI建议将您的一个标记更改为
    scikit,请学习
    ,因为您将有更多具有该专业知识的人看到您的问题。唯一缺少的是它不会为您比较值。您必须比较r平方的值,然后使用该数量的特性。亲爱的@ron,我的方法是标准的,它会比较值并选择最好的,正如您所说,一旦您熟悉Python,您就会理解我的简单解决方案:)顺便说一句,如果您是指比较,您希望看到元数据,
    print()
    是你的朋友:)@ron如果这个答案对你有帮助,请接受它:)
    def show_best_model(support_array, columns, model):
        y_pred = model.predict(X_test.iloc[:, support_array])
        r2 = r2_score(y_test, y_pred)
        n = len(y_pred) #size of test set
        p = len(model.coef_) #number of features
        adjusted_r2 = 1-(1-r2)*(n-1)/(n-p-1)
        print('Adjusted R-squared: %.2f' % adjusted_r2)
        j = 0;
        for i in range(len(support_array)):
            if support_array[i] == True:
                print(columns[i], model.coef_[j])
                j +=1
    
    from sklearn.feature_selection import RFE
    regr = LinearRegression()
    
    for m in range(1,11):
        selector = RFE(regr, m, step=1) 
        selector.fit(x_train, y_train)
        if m<11:
            show_best_model(selector.support_, x_train.columns, selector.estimator_)
    
    X = df.loc[:,['Age_08_04', 'KM', 'HP', 'Weight', 'Automatic_airco']]
    x_train, X_test, y_train, y_test = train_test_split(X, y,
                                                        test_size =.4,
                                                        random_state = 20)
    regr = LinearRegression()
    regr.fit(x_train, y_train)
    y_pred = regr.predict(X_test)
    print('Average error: %.2f' %mean(y_test - y_pred))
    print('Mean absolute error: %.2f' %mean_absolute_error(y_test, y_pred))
    print('Mean absolute error: %.2f' %(mean(abs(y_test - y_pred))))
    print("Root mean squared error: %.2f"
          % math.sqrt(mean_squared_error(y_test, y_pred)))
    print('percentage absolute error: %.2f' %mean(abs((y_test - y_pred)/y_test)))
    print('percentage absolute error: %.2f' %(mean(abs(y_test - y_pred))/mean(y_test)))
    print('R-squared: %.2f' % r2_score(y_test, y_pred))
    
    x_train = x_train.loc[:,
                          ['Age_08_04', 'KM' , 'HP',
                           'Weight', 'Automatic_airco']]
    X_test = X_test.loc[:,
                        ['Age_08_04', 'KM' , 'HP',
                         'Weight', 'Automatic_airco']]
    selector = RFE(regr, 5, step=1)
    selector.fit(x_train, y_train)
    show_best_model(selector.support_, x_train.columns, selector.estimator_)