Python 为培训doc2vec构建学习曲线

Python 为培训doc2vec构建学习曲线,python,machine-learning,scikit-learn,doc2vec,Python,Machine Learning,Scikit Learn,Doc2vec,我正在尝试优化用于训练嵌入的历元数。有没有办法为这个过程生成一个学习曲线 例如,我可以为常规监督分类创建学习曲线。 import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import learning_curve from sklearn.model_selection import StratifiedShuffleSplit def plot_learning_curve(estima

我正在尝试优化用于训练嵌入的历元数。有没有办法为这个过程生成一个学习曲线

例如,我可以为常规监督分类创建学习曲线。

import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
from sklearn.model_selection import StratifiedShuffleSplit

def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
    plt.figure()
    plt.title(title)
    if ylim is not None:
        plt.ylim(*ylim)
    plt.xlabel("Training examples")
    plt.ylabel("Score")
    train_sizes, train_scores, test_scores = learning_curve(
        estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)
    plt.grid()

    plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
                     train_scores_mean + train_scores_std, alpha=0.1,
                     color="r")
    plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
                     test_scores_mean + test_scores_std, alpha=0.1, color="g")
    plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
             label="Training score")
    plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
             label="Cross-validation score")

    plt.legend(loc="best")
    return plt

title = "Learning Curves (SGDClassifier)"

cv = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0)

estimator = SGDClassifier()
plot_learning_curve(estimator, title, X_all.todense(), y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from nltk.tokenize import word_tokenize


X_tagged = [TaggedDocument(words=word_tokenize(_d.lower()), tags=[str(i)]) for i, _d in enumerate(X)]

model = Doc2Vec(size=8, alpha=0.05, min_alpha=0.00025, dm =1)

model.build_vocab(X_tagged)

model_title.train(X_tagged, total_examples=model.corpus_count, epochs=50)
我可以训练一个嵌入,例如。

import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
from sklearn.model_selection import StratifiedShuffleSplit

def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
    plt.figure()
    plt.title(title)
    if ylim is not None:
        plt.ylim(*ylim)
    plt.xlabel("Training examples")
    plt.ylabel("Score")
    train_sizes, train_scores, test_scores = learning_curve(
        estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)
    plt.grid()

    plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
                     train_scores_mean + train_scores_std, alpha=0.1,
                     color="r")
    plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
                     test_scores_mean + test_scores_std, alpha=0.1, color="g")
    plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
             label="Training score")
    plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
             label="Cross-validation score")

    plt.legend(loc="best")
    return plt

title = "Learning Curves (SGDClassifier)"

cv = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0)

estimator = SGDClassifier()
plot_learning_curve(estimator, title, X_all.todense(), y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from nltk.tokenize import word_tokenize


X_tagged = [TaggedDocument(words=word_tokenize(_d.lower()), tags=[str(i)]) for i, _d in enumerate(X)]

model = Doc2Vec(size=8, alpha=0.05, min_alpha=0.00025, dm =1)

model.build_vocab(X_tagged)

model_title.train(X_tagged, total_examples=model.corpus_count, epochs=50)
但是我如何在训练嵌入时创建学习曲线呢


我对培训嵌入没有足够的直觉来理解这一点。

通常,学习曲线根据不同数量的培训数据绘制模型的性能(如一些定量分数,如“准确性”)

因此,您需要选择一种方法为您的
Doc2Vec
模型评分。(这可能是通过使用doc向量作为另一个分类器的输入或其他方式实现的。)然后,您需要使用各种不同的训练集大小重新创建
Doc2Vec
模型,对每个模型进行评分,并将
(语料库大小,分数)
数据点输入到绘图中

请注意,
gensim
包括一个包装类,用于将
Doc2Vec
培训步骤放入
scikit-learn
管道:


因此,您可以将现有代码的简单
估计器
替换为多步骤管道,包括
d2v变压器
。因此,您将以与现有代码高度相似的方式创建学习曲线图。

与您的主要问题分开:您的培训循环一团糟。它实际上对数据进行了250次传递(5次
model.iter
epoch每次调用
train()
,调用50次)。在第一次调用train时,模型的内部alpha值将从开始的
0.05
值适当下降到结束的
0.00025
值。但在其他49次调用中,它的值将在0.05到0.04之间(
0.05-50*0.0002
)。实际上,您应该只调用一次
train()
,在
epoch
中调用所需的次数,然后让模型本身正确地管理
alpha
。不要相信其他例子。谢谢@gojomo。更新