Python 3.x 停止字#未从数据帧中的列中删除

Python 3.x 停止字#未从数据帧中的列中删除,python-3.x,nltk,tokenize,stop-words,Python 3.x,Nltk,Tokenize,Stop Words,单词#仍然出现在tokenize列中,如何删除它 作为pd进口熊猫 导入sklearn作为sk 将numpy作为np导入 导入sklearn 导入nltk 从nltk.tokenize导入单词\u tokenize 导入操作系统 #nltk.download() #nltk.download('punkt') #nltk.download('stopwords')) 从nltk.corpus导入停止词 twit\u data\u test=pd.read\u csv(r'C:\Users\yus

单词#仍然出现在tokenize列中,如何删除它

作为pd进口熊猫 导入sklearn作为sk 将numpy作为np导入 导入sklearn 导入nltk 从nltk.tokenize导入单词\u tokenize 导入操作系统 #nltk.download() #nltk.download('punkt') #nltk.download('stopwords')) 从nltk.corpus导入停止词 twit\u data\u test=pd.read\u csv(r'C:\Users\yusuf\Dropbox\My PC(LAPTOP-OFS8216E)\Desktop\2021 studies\data mining assignment\test.csv')

twit_data_train=pd.read_csv(r'C:\Users\yusuf\Dropbox\My PC (LAPTOP-OFS8216E)\Desktop\2021 studies\Data mining assignment\train.csv')

df_twit_data_test=pd.DataFrame(twit_data_test)
df_twit_data_train=pd.DataFrame(twit_data_train)



print('Number of rows and columns in test data set ',df_twit_data_test.shape)
print('Number of rows and columns in train data set',df_twit_data_train.shape)


print('Number of rows with negitive tweets',np.sum(df_twit_data_train['label'] == 1))
print('Number of rows with positive tweets',np.sum(df_twit_data_train['label'] == 0))

a=round((np.sum(df_twit_data_train['label'] == 1)/(np.sum(df_twit_data_train['label'] == 0)+np.sum(df_twit_data_train['label'] == 1))*100),2)
b=round((np.sum(df_twit_data_train['label'] == 0)/(np.sum(df_twit_data_train['label'] == 0)+np.sum(df_twit_data_train['label'] == 1))*100),2)

print('The training data set is imblanaced as {0} Percentage of the tweets are Negative and {1} Percetage of the tweets are positive'.format(a,b))


# Tokenization
df_twit_data_test['tokenize']=df_twit_data_test['tweet'].apply(word_tokenize)
df_twit_data_train['tokemize']=df_twit_data_train['tweet'].apply(word_tokenize)


#punctuation removal




#stop words 
# stop = stopwords.words('english')
stopwords = list(stopwords.words('english'))
stopwords.append("#")

df_twit_data_test['tokenize'].apply(lambda x: [item for item in x if item not in stopwords])
print(df_twit_data_test)