Python 辅助函数绘制的子图发生意外移位

Python 辅助函数绘制的子图发生意外移位,python,matplotlib,plot,scikit-learn,Python,Matplotlib,Plot,Scikit Learn,我正在尝试将两个绘图合并为一个: 在左边的图中,我想用OVA分类器对应的超平面显示决策边界,在右边的图中,我想显示决策概率 这是迄今为止的代码: import numpy as np import matplotlib.pyplot as plt import seaborn as sn from sklearn import datasets from sklearn import preprocessing from sklearn.neighbors import KNeighborsC

我正在尝试将两个绘图合并为一个:

在左边的图中,我想用OVA分类器对应的超平面显示决策边界,在右边的图中,我想显示决策概率

这是迄今为止的代码:

import numpy as np
import matplotlib.pyplot as plt
import seaborn as sn

from sklearn import datasets
from sklearn import preprocessing
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC

def plot_hyperplane(c, color, fitted_model):
    """
    Plot the one-against-all classifiers for the given model.

    Parameters
    --------------

    c : index of the hyperplane to be plot
    color : color to be used when drawing the line
    fitted_model : the fitted model
    """
    xmin, xmax = plt.xlim()
    ymin, ymax = plt.ylim()

    try:
        coef = fitted_model.coef_
        intercept = fitted_model.intercept_
    except:
        return

    def line(x0):
        return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]

    plt.plot([xmin, xmax], [line(xmin), line(xmax)], ls="--", color=color, zorder=3)


def plot_decision_boundary(X, y, fitted_model, features, targets):
    """
    This function plots a model decision boundary as well as it tries to plot 
    the decision probabilities, if available.
    Requires a model fitted with two features only.

    Parameters
    --------------

    X : the data to learn
    y : the classification labels
    fitted_model : the fitted model
    """
    cmap = plt.get_cmap('Set3')
    prob = cmap
    colors = [cmap(i) for i in np.linspace(0, 1, len(fitted_model.classes_))]

    plt.figure(figsize=(9.5, 5))
    for i, plot_type in enumerate(['Decision Boundary', 'Decision Probabilities']):
        plt.subplot(1, 2, i+1)

        mesh_step_size = 0.01  # step size in the mesh
        x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
        y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
        xx, yy = np.meshgrid(np.arange(x_min, x_max, mesh_step_size), np.arange(y_min, y_max, mesh_step_size))
        # First plot, predicted results using the given model
        if i == 0:
            Z = fitted_model.predict(np.c_[xx.ravel(), yy.ravel()])
            for h, color in zip(fitted_model.classes_, colors):
                plot_hyperplane(h, color, fitted_model) 
        # Second plot, predicted probabilities using the given model
        else:
            prob = 'RdYlBu_r'
            try:
                Z = fitted_model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
            except:
                plt.text(0.4, 0.5, 'Probabilities Unavailable', horizontalalignment='center', 
                         verticalalignment='center', transform=plt.gca().transAxes, fontsize=12)
                plt.axis('off')
                break
        Z = Z.reshape(xx.shape)
        # Display Z
        plt.imshow(Z, interpolation='nearest', cmap=prob, alpha=0.5, 
                   extent=(x_min, x_max, y_min, y_max), origin='lower', zorder=1)
        # Plot the data points
        for i, color in zip(fitted_model.classes_, colors):
            idx = np.where(y == i)
            plt.scatter(X[idx, 0], X[idx, 1], facecolor=color, edgecolor='k', lw=1,
                        label=iris.target_names[i], cmap=cmap, alpha=0.8, zorder=2)
        plt.title(plot_type + '\n' + 
                  str(fitted_model).split('(')[0]+ ' Test Accuracy: ' + str(np.round(fitted_model.score(X, y), 5)))
        plt.xlabel(features[0])
        plt.ylabel(features[1])
        plt.gca().set_aspect('equal')   
    plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))    
    plt.tight_layout()
    plt.subplots_adjust(top=0.9, bottom=0.08, wspace=0.02)
    plt.show()


if __name__ == '__main__': 
    iris = datasets.load_iris()
    X = iris.data[:, [0, 2]]
    y = iris.target

    scaler = preprocessing.StandardScaler().fit_transform(X)

    clf1 = DecisionTreeClassifier(max_depth=4)
    clf2 = KNeighborsClassifier(n_neighbors=7)
    clf3 = SVC(kernel='rbf', probability=True)
    clf4 = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)

    clf1.fit(X, y)
    clf2.fit(X, y)
    clf3.fit(X, y)
    clf4.fit(X, y)

    plot_decision_boundary(X, y, clf1, iris.feature_names, iris.target_names[[0, 2]])
    plot_decision_boundary(X, y, clf2, iris.feature_names, iris.target_names[[0, 2]])
    plot_decision_boundary(X, y, clf3, iris.feature_names, iris.target_names[[0, 2]])
    plot_decision_boundary(X, y, clf4, iris.feature_names, iris.target_names[[0, 2]])
结果是:

可以看出,对于给定代码中的最后一个示例clf4,到目前为止,我无法在错误的位置绘制超平面。我不知道如何纠正这个问题。应将它们转换为与所用特征相关的正确范围,以适合模型


谢谢。

显然,问题是代表超平面的虚线的端点与最终和预期的xlim和ylim不一致。这种情况的一个好处是已经定义了x_min,x_max,y_min,y_max。因此,在绘制超平面之前,请使用该选项并通过应用以下3行来修复xlim和ylim。具体来说,请在注释行前面添加使用给定模型绘制的第一个预测结果


当我尝试此操作时,出现错误,Python停止工作:QWindowsWindow::setGeometry:无法在QWidgetWindow/'MainWindowClassWindow'上设置几何体1000x1069+9+38。结果几何体:1000x1055+9+38帧:9,38,9,9,自定义边距:0,0,0,最小尺寸:72x69,最大尺寸:16777215x16777215。@pceccon看起来这是一个Qt问题,但我没有看到Qt在您的OP中的位置。很抱歉,我没有考虑到这一点。而且我不擅长pyqt。要解决您的问题,一个选择是尝试不使用Qt的代码,就像在Jupyter笔记本中一样。这将帮助您确定绘制超平面位置错误问题的原因。如果xlim和ylim确实是原因,那么您可以解决如何使用Qt设置xlim和ylim的问题。希望这会有所帮助…我不会显式导入pyqt。我会设法解决这个问题。谢谢。@pceccon不客气。很抱歉,我不能给你一个完整的解决方案。
        ax = plt.gca()
        ax.set_xlim((x_min, x_max), auto=False)
        ax.set_ylim((y_min, y_max), auto=False)