Jupyter notebook 在Gensim中查找每个句子的主要主题时输入错误

Jupyter notebook 在Gensim中查找每个句子的主要主题时输入错误,jupyter-notebook,typeerror,python-3.7,gensim,topic-modeling,Jupyter Notebook,Typeerror,Python 3.7,Gensim,Topic Modeling,我正在使用gensim(在jupyter笔记本中)进行主题建模。我成功地创建了一个模型并将其可视化。代码如下: import time start_time = time.time() import re import spacy import nltk import pyLDAvis import pyLDAvis.gensim import gensim import gensim.corpora as corpora from gensim.utils import simple_prep

我正在使用gensim(在jupyter笔记本中)进行主题建模。我成功地创建了一个模型并将其可视化。代码如下:

import time
start_time = time.time()
import re
import spacy
import nltk
import pyLDAvis
import pyLDAvis.gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# nlp = spacy.load('en')
stop_word_list = nltk.corpus.stopwords.words('english')
stop_word_list.extend(['from', 'subject', 're', 'edu', 'use'])
df = pd.read_csv('Topic_modeling.csv')
data = df.Articles.values.tolist()

# Remove Emails
data = [re.sub('\S*@\S*\s?', '', sent) for sent in data]

# Remove new line characters
data = [re.sub('\s+', ' ', sent) for sent in data]

# Remove distracting single quotes
data = [re.sub("\'", "", sent) for sent in data]


def sent_to_words(sentences):
    for sentence in sentences:
        yield gensim.utils.simple_preprocess(str(sentence), deacc=True)  # deacc=True removes punctuations


data_words = list(sent_to_words(data))

# Build the bigram and trigram models
bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases.
trigram = gensim.models.Phrases(bigram[data_words], threshold=100)

# Faster way to get a sentence clubbed as a trigram/bigram
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)

# Define functions for stopwords, bigrams, trigrams and lemmatization
def remove_stopwords(texts):
    return [[word for word in simple_preprocess(str(doc)) if word not in stop_word_list] for doc in texts]

def make_bigrams(texts):
    return [bigram_mod[doc] for doc in texts]

def make_trigrams(texts):
    return [trigram_mod[bigram_mod[doc]] for doc in texts]

def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
    """https://spacy.io/api/annotation"""
    texts_out = []
    for sent in texts:
        doc = nlp(" ".join(sent))
        texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
    return texts_out


# Remove Stop Words
data_words_nostops = remove_stopwords(data_words)

# Form Bigrams
data_words_bigrams = make_bigrams(data_words_nostops)

# Initialize spacy 'en' model, keeping only tagger component (for efficiency)
# python3 -m spacy download en
nlp = spacy.load('en', disable=['parser', 'ner'])

# Do lemmatization keeping only noun, adj, vb, adv
data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN','ADJ'])

# Create Dictionary
id2word = corpora.Dictionary(data_lemmatized)

# Create Corpus
texts = data_lemmatized

# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]


# Build LDA model
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
                                           id2word=id2word,
                                           num_topics= 3,
                                           random_state=100,
                                           update_every=1,
                                           chunksize=100,
                                           passes=20,
                                           alpha=0.4,
                                           eta=0.2,
                                           per_word_topics=True)

print(lda_model.print_topics())
doc_lda = lda_model[corpus]
现在我想在每个句子中找出主要的主题。因此,我使用以下代码:

def format_topics_sentences(ldamodel=lda_model, corpus=corpus, texts=data):
    # Init output
    sent_topics_df = pd.DataFrame()

    # Get main topic in each document
    for i, row in enumerate(ldamodel[corpus]):
        row = sorted(row, key=lambda x: (x[1]), reverse=True)
        # Get the Dominant topic, Perc Contribution and Keywords for each document
        for j, (topic_num, prop_topic) in enumerate(row):
            if j == 0:  # => dominant topic
                wp = ldamodel.show_topic(topic_num)
                topic_keywords = ", ".join([word for word, prop in wp])
                sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)
            else:
                break
    sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']

    # Add original text to the end of the output
    contents = pd.Series(texts)
    sent_topics_df = pd.concat([sent_topics_df, contents], axis=1)
    return(sent_topics_df)


df_topic_sents_keywords = format_topics_sentences(ldamodel=lda_model, corpus=corpus, texts=data)

# Format
df_dominant_topic = df_topic_sents_keywords.reset_index()
df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']

# Show
df_dominant_topic.head(10)
然而,我得到以下错误:

TypeError回溯(最近的调用 最后)在 22 23 --->24个df_topic_sents_keywords=格式_topics_句子(ldamodel=lda_model,corpus=corpus,text=data) 25 26#格式

格式为主题句子(ldamodel, 语料库、文本) 5#在每个文档中获取主要主题 6对于i,枚举中的行(ldamodel[corpus]): ---->7行=已排序(行,键=λx:(x[1]),反向=真) 8#获取每个文档的主要主题、Perc贡献和关键字 9表示枚举(行)中的j(主题编号,属性主题):


TypeError:“更改以下行:

row = sorted(row, key=lambda x: (x[1]), reverse=True)

它选择元组的第一个元素。输出将是一个元组列表。此外,还可以按第二个元素对列表中的元组进行排序

row = sorted(row[0], key=lambda x: (x[1]), reverse=True)