python中从一个决策树(J48)分类到集成的转换

python中从一个决策树(J48)分类到集成的转换,python,scikit-learn,decision-tree,j48,c4.5,Python,Scikit Learn,Decision Tree,J48,C4.5,我想实现的分类算法的基础上。我有一个J48(C4.5)决策树(下面提到的代码)。我想在数据集上运行它几次(I_max),并计算所有集合的C*=类成员概率。如本文第8页所述 下面是我基于本文提出的算法实现的装饰。请随意改进解决方案 class EnsembleClasifier(): def __init__(self,base_classifier,labels): self.classifier = [base_classifier] self.lab

我想实现的分类算法的基础上。我有一个
J48
(C4.5)决策树(下面提到的代码)。我想在数据集上运行它几次(
I_max
),并计算所有集合的C*=类成员概率。如本文第8页所述


下面是我基于本文提出的算法实现的装饰。请随意改进解决方案

class EnsembleClasifier():

    def __init__(self,base_classifier,labels):
        self.classifier = [base_classifier]
        self.labels = labels
    def add_classifier(self,classifier):
        self.classifier.append(classifier)
    def remove_last_classifier(self):
        self.classifier.pop(-1)
    def predict_proba(self,X):
        return np.array([clf.predict_proba(X) for clf in self.classifier]).sum(axis=0)/len(self.classifier)
    def predict(self,X):
        return labels[np.argmax(self.predict_proba(X),axis=1)]
    def error(self,X,y):
        return 1 - accuracy_score(y,ensembleClasifier.predict(X))

class Artificial_data():

    def __init__(self,X,y,dtypes):
        self.dtypes = {}
        self._generator = {}
        self.labels = y.unique()
        for c,dtype in zip(X.columns,dtypes):
            self.dtypes[c] = dtype
            if dtype == 'numeric':
                self._generator[c] = {'mean':X[c].mean(),'std':X[c].std()}
            else:
                unique_values = X[c].value_counts() / X.shape[0]
                self._generator[c] = {'values':unique_values.index,'prob':unique_values.values}

    def sample_generator(self,ensembleClasifier,nb_samples=1):
        syn_X = pd.DataFrame()
        for c in self.dtypes.keys():
            if self.dtypes[c] == 'numeric':
                syn_X[c] = np.random.normal(self._generator[c]['mean'],self._generator[c]['std'],nb_samples)
            else:
                syn_X[c] = np.random.choice(self._generator[c]['values'],p=self._generator[c]['prob'],
                                             size=nb_samples,replace=True)
        p_hat = ensembleClasifier.predict_proba(syn_X)
        p_hat[p_hat==0] = 1e-5
        inverse_p = 1/p_hat
        new_p = inverse_p / inverse_p.sum(axis=1)[:, np.newaxis]
        syn_y = [np.random.choice(self.labels,p=new_p[i]) for i in range(nb_samples)]
        return syn_X,syn_y  


import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn import datasets
iris = datasets.load_iris()
X, y = iris.data, iris.target
X_train_base, X_test, y_train_base, y_test = train_test_split( pd.DataFrame(X), pd.Series(y), 
                                                              test_size = 0.3, random_state = 100)

# dtypes=['numeric' for _ in range(7)] + ['nominal'] #use this for abalone dataset
dtypes=['numeric' for _ in range(4)]
np.random.seed(1)
artifical_data = Artificial_data(X_train_base,y_train_base,dtypes)

c_size = 15
i_max = 300 
R_size = len(X_train_base)
i = 1
trails =1
labels = np.unique(y_train_base)
clf_entropy = DecisionTreeClassifier(random_state = 1, max_depth=2)
clf_entropy.fit(X_train_base, y_train_base)


ensembleClasifier = EnsembleClasifier(clf_entropy,labels)
error_bst = ensembleClasifier.error(X_train_base,y_train_base)

while (i<c_size and trails<i_max):
    X_syn,y_syn =artifical_data.sample_generator(ensembleClasifier,R_size)
    X_train=pd.concat([X_train_base,X_syn],axis=0)
    y_train=np.append(y_train_base,y_syn,axis=0)

    C_prime=DecisionTreeClassifier( random_state = 1, max_depth=2)
    C_prime.fit(X_train, y_train)

    ensembleClasifier.add_classifier(C_prime)

    error_i = ensembleClasifier.error(X_train_base,y_train_base)

    if error_i <= error_bst:
        print('improvement')
        error_bst = error_i
        print(error_i)
        i += 1
    else:
        ensembleClasifier.remove_last_classifier()

    trails +=1
class EnsembleClassifier():
定义初始化(自、基本分类器、标签):
self.classifier=[基本分类器]
self.labels=标签
def添加_分类器(自身、分类器):
self.classifier.append(分类器)
def移除最后一个分类器(自身):
self.classifier.pop(-1)
def预测概率(自我,X):
返回np.array([clf.predict_proba(X)for clf in self.classifier]).sum(axis=0)/len(self.classifier)
def预测(自我,X):
返回标签[np.argmax(self.predict_proba(X),axis=1)]
def错误(自身、X、y):
返回1-准确度(y,集成分类器预测(X))
类数据():
定义初始化(self、X、y、数据类型):
self.dtypes={}
self.\u生成器={}
self.labels=y.unique()
对于c,在zip中键入数据(X.columns,dtypes):
self.dtypes[c]=dtype
如果dtype=='numeric':
self.u生成器[c]={'mean':X[c].mean(),'std':X[c].std()}
其他:
唯一值=X[c]。值计数()/X.shape[0]
self._生成器[c]={'values':unique_values.index,'prob':unique_values.values}
def样本发生器(自身、集成分类器、nb样本=1):
syn_X=pd.DataFrame()
对于self.dtypes.keys()中的c:
如果self.dtypes[c]=“numeric”:
syn_X[c]=np.随机.正态(自._生成器[c]['mean'],自._生成器[c]['std'],nb_样本)
其他:
syn_X[c]=np.random.choice(self.\u生成器[c]['values'],p=self.\u生成器[c]['prob'],
尺寸=nb_样本,替换=真)
p_hat=集成分类器。预测概率(syn_X)
p_hat[p_hat==0]=1e-5
逆p=1/p
新的p=逆的p/逆的p.和(轴=1)[:,np.新轴]
syn_y=[np.random.choice(self.labels,p=new_p[i]),用于范围内的i(nb_样本)]
返回同步X,同步y
将numpy作为np导入
作为pd进口熊猫
从sklearn.model\u选择导入列车\u测试\u拆分
从sklearn.tree导入DecisionTreeClassifier
从sklearn.metrics导入准确性\u分数
从sklearn导入数据集
iris=数据集。加载\u iris()
十、 y=iris.data,iris.target
X_列_基地,X_测试,y_列_基地,y_测试=列_测试_分割(pd数据帧(X),pd系列(y),
测试尺寸=0.3,随机状态=100)
#dtypes=['numeric'表示范围(7)]+['nominal'].\n将其用于鲍鱼数据集
数据类型=[“数值”表示范围(4)]
np.随机种子(1)
人工列车数据=人工列车数据(X列车库、y列车库、数据类型)
c_尺寸=15
i_max=300
R_尺寸=长度(X_列车底部)
i=1
轨迹=1
标签=np.唯一(y\U列\U基)
clf_熵=决策树分类器(随机状态=1,最大深度=2)
clf_熵拟合(X_序列基,y_序列基)
EnsembleClassifier=EnsembleClassifier(clf_熵,标签)
错误=EnsembleClassifier.error(X\U列\U基,y\U列\U基)

虽然@AI_学习很多,但是有没有一种方法可以实现下面数据集的代码呢?()只要尝试更改数据类型,#数据类型=['numeric'表示范围(7)]+['nominal']#将其用于鲍鱼数据集
class EnsembleClasifier():

    def __init__(self,base_classifier,labels):
        self.classifier = [base_classifier]
        self.labels = labels
    def add_classifier(self,classifier):
        self.classifier.append(classifier)
    def remove_last_classifier(self):
        self.classifier.pop(-1)
    def predict_proba(self,X):
        return np.array([clf.predict_proba(X) for clf in self.classifier]).sum(axis=0)/len(self.classifier)
    def predict(self,X):
        return labels[np.argmax(self.predict_proba(X),axis=1)]
    def error(self,X,y):
        return 1 - accuracy_score(y,ensembleClasifier.predict(X))

class Artificial_data():

    def __init__(self,X,y,dtypes):
        self.dtypes = {}
        self._generator = {}
        self.labels = y.unique()
        for c,dtype in zip(X.columns,dtypes):
            self.dtypes[c] = dtype
            if dtype == 'numeric':
                self._generator[c] = {'mean':X[c].mean(),'std':X[c].std()}
            else:
                unique_values = X[c].value_counts() / X.shape[0]
                self._generator[c] = {'values':unique_values.index,'prob':unique_values.values}

    def sample_generator(self,ensembleClasifier,nb_samples=1):
        syn_X = pd.DataFrame()
        for c in self.dtypes.keys():
            if self.dtypes[c] == 'numeric':
                syn_X[c] = np.random.normal(self._generator[c]['mean'],self._generator[c]['std'],nb_samples)
            else:
                syn_X[c] = np.random.choice(self._generator[c]['values'],p=self._generator[c]['prob'],
                                             size=nb_samples,replace=True)
        p_hat = ensembleClasifier.predict_proba(syn_X)
        p_hat[p_hat==0] = 1e-5
        inverse_p = 1/p_hat
        new_p = inverse_p / inverse_p.sum(axis=1)[:, np.newaxis]
        syn_y = [np.random.choice(self.labels,p=new_p[i]) for i in range(nb_samples)]
        return syn_X,syn_y  


import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn import datasets
iris = datasets.load_iris()
X, y = iris.data, iris.target
X_train_base, X_test, y_train_base, y_test = train_test_split( pd.DataFrame(X), pd.Series(y), 
                                                              test_size = 0.3, random_state = 100)

# dtypes=['numeric' for _ in range(7)] + ['nominal'] #use this for abalone dataset
dtypes=['numeric' for _ in range(4)]
np.random.seed(1)
artifical_data = Artificial_data(X_train_base,y_train_base,dtypes)

c_size = 15
i_max = 300 
R_size = len(X_train_base)
i = 1
trails =1
labels = np.unique(y_train_base)
clf_entropy = DecisionTreeClassifier(random_state = 1, max_depth=2)
clf_entropy.fit(X_train_base, y_train_base)


ensembleClasifier = EnsembleClasifier(clf_entropy,labels)
error_bst = ensembleClasifier.error(X_train_base,y_train_base)

while (i<c_size and trails<i_max):
    X_syn,y_syn =artifical_data.sample_generator(ensembleClasifier,R_size)
    X_train=pd.concat([X_train_base,X_syn],axis=0)
    y_train=np.append(y_train_base,y_syn,axis=0)

    C_prime=DecisionTreeClassifier( random_state = 1, max_depth=2)
    C_prime.fit(X_train, y_train)

    ensembleClasifier.add_classifier(C_prime)

    error_i = ensembleClasifier.error(X_train_base,y_train_base)

    if error_i <= error_bst:
        print('improvement')
        error_bst = error_i
        print(error_i)
        i += 1
    else:
        ensembleClasifier.remove_last_classifier()

    trails +=1