Python 使用doc2vec和LogisticRegression对输入文本进行分类
我试图使用python中的doc2vec将用户输入文本分为两类。我有以下代码来训练模型,然后对输入文本进行分类。问题是,我找不到任何对字符串进行分类的方法。 我是新手,所以请忽略错误 以下是课堂参考的链接Python 使用doc2vec和LogisticRegression对输入文本进行分类,python,machine-learning,logistic-regression,text-classification,doc2vec,Python,Machine Learning,Logistic Regression,Text Classification,Doc2vec,我试图使用python中的doc2vec将用户输入文本分为两类。我有以下代码来训练模型,然后对输入文本进行分类。问题是,我找不到任何对字符串进行分类的方法。 我是新手,所以请忽略错误 以下是课堂参考的链接 在最后一步中,您应该首先使用infer()方法,因为您需要为输入的文本创建文档向量,任何新词(对于词汇表)都将被忽略。然后将结果文档向量传递给分类器 我认为使用Logistic回归和SGD分类器是有区别的,通常对于这个大小的数据集,Logistic回归应该是您所需要的全部。最好的方法是尝试
在最后一步中,您应该首先使用infer()方法,因为您需要为输入的文本创建文档向量,任何新词(对于词汇表)都将被忽略。然后将结果文档向量传递给分类器
我认为使用Logistic回归和SGD分类器是有区别的,通常对于这个大小的数据集,Logistic回归应该是您所需要的全部。最好的方法是尝试使用默认参数,然后在工作后进行调优。类中没有infer()方法。对向量博士感到困惑。请提供任何例子?对不起,这里对这一部分的解释很好:这是一个不错的教程,有推特-我可以推荐M Czerny的一个不错的教程,我想他也像你一样使用imdb评论?
# gensim modules
from gensim import utils
from gensim.models.doc2vec import TaggedDocument
from gensim.models import Doc2Vec
# random shuffle
from random import shuffle
# numpy
import numpy
# classifier
from sklearn.linear_model import LogisticRegression
import logging
import sys
log = logging.getLogger()
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
class TaggedLineSentence(object):
def __init__(self, sources):
self.sources = sources
flipped = {}
# make sure that keys are unique
for key, value in sources.items():
if value not in flipped:
flipped[value] = [key]
else:
raise Exception('Non-unique prefix encountered')
def __iter__(self):
for source, prefix in self.sources.items():
with utils.smart_open(source) as fin:
for item_no, line in enumerate(fin):
yield TaggedDocument(utils.to_unicode(line).split(), [prefix + '_%s' % item_no])
def to_array(self):
self.sentences = []
for source, prefix in self.sources.items():
with utils.smart_open(source) as fin:
for item_no, line in enumerate(fin):
self.sentences.append(TaggedDocument(utils.to_unicode(line).split(), [prefix + '_%s' % item_no]))
return self.sentences
def sentences_perm(self):
shuffle(self.sentences)
return self.sentences
log.info('source load')
sources = {'test-neg.txt':'TEST_NEG', 'test-pos.txt':'TEST_POS', 'train-neg.txt':'TRAIN_NEG', 'train-pos.txt':'TRAIN_POS', 'train-unsup.txt':'TRAIN_UNS'}
log.info('TaggedDocument')
sentences = TaggedLineSentence(sources)
log.info('D2V')
model = Doc2Vec(min_count=1, window=10, size=100, sample=1e-4, negative=5, workers=7)
model.build_vocab(sentences.to_array())
log.info('Epoch')
for epoch in range(10):
log.info('EPOCH: {}'.format(epoch))
model.train(sentences.sentences_perm())
log.info('Model Save')
model.save('./imdb.d2v')
model = Doc2Vec.load('./imdb.d2v')
log.info('Sentiment')
train_arrays = numpy.zeros((25000, 100))
train_labels = numpy.zeros(25000)
for i in range(12500):
prefix_train_pos = 'TRAIN_POS_' + str(i)
prefix_train_neg = 'TRAIN_NEG_' + str(i)
train_arrays[i] = model.docvecs[prefix_train_pos]
train_arrays[12500 + i] = model.docvecs[prefix_train_neg]
train_labels[i] = 1
train_labels[12500 + i] = 0
test_arrays = numpy.zeros((25000, 100))
test_labels = numpy.zeros(25000)
for i in range(12500):
prefix_test_pos = 'TEST_POS_' + str(i)
prefix_test_neg = 'TEST_NEG_' + str(i)
test_arrays[i] = model.docvecs[prefix_test_pos]
test_arrays[12500 + i] = model.docvecs[prefix_test_neg]
test_labels[i] = 1
test_labels[12500 + i] = 0
log.info('Fitting')
classifier = LogisticRegression()
classifier.fit(train_arrays, train_labels)
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, penalty='l2', random_state=None, tol=0.0001)
print(classifier.score(test_arrays, test_labels))
# classify input text
text = input("Enter Your text:")
print(classifier.predict(text.split()))