Python 在scikit学习中使用标签编码器编码数据时出现类型错误

Python 在scikit学习中使用标签编码器编码数据时出现类型错误,python,pandas,machine-learning,scikit-learn,nlp,Python,Pandas,Machine Learning,Scikit Learn,Nlp,我无法使用scikit学习中的标签编码器对数据进行编码。 dataset.csv有两列文本和标签 我试图将数据集中的文本读入一个列表,将标签读入另一个列表,并将这些列表添加到数据框中,但似乎不起作用 from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm from sklearn.feature_extraction.text import TfidfVectorize

我无法使用scikit学习中的标签编码器对数据进行编码。

dataset.csv
有两列文本和标签 我试图将数据集中的文本读入一个列表,将标签读入另一个列表,并将这些列表添加到数据框中,但似乎不起作用

from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn import decomposition, ensemble
import pandas, xgboost, numpy, string

data = open('dataset.csv').read()
labels = []
texts = []

for i ,line in enumerate(data.split("\n")):
    content = line.split("\",")
    texts.append(content[0])
    labels.append(content[1:])

trainDF = pandas.DataFrame()
trainDF['text'] = texts
trainDF['label'] = labels

train_x, valid_x, train_y, valid_y = model_selection.train_test_split(trainDF['text'],trainDF['label'],test_size = 0.2,random_state = 0)
encoder = preprocessing.LabelEncoder()
train_y = encoder.fit_transform(train_y)
valid_y = encoder.fit_transform(valid_y)

count_vect = CountVectorizer(analyzer='word', token_pattern=r'\w{1,}')
count_vect.fit(trainDF['texts'])

xtrain_count =  count_vect.transform(train_x)
xvalid_count =  count_vect.transform(valid_x)

tfidf_vect = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', max_features=5000)
tfidf_vect.fit(trainDF['texts'])
xtrain_tfidf =  tfidf_vect.transform(train_x)
xvalid_tfidf =  tfidf_vect.transform(valid_x)

accuracy = train_model(svm.SVC(), xtrain_tfidf, train_y, xvalid_tfidf)

print(accuracy)
错误:

Traceback (most recent call last):
  File "/home/crackthumb/environments/my_env/lib/python3.6/site-packages/sklearn/preprocessing/label.py", line 105, in _encode
    res = _encode_python(values, uniques, encode)
  File "/home/crackthumb/environments/my_env/lib/python3.6/site-packages/sklearn/preprocessing/label.py", line 59, in _encode_python
    uniques = sorted(set(values))
TypeError: unhashable type: 'list'

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "Classifier.py", line 21, in <module>
    train_y = encoder.fit_transform(train_y)
  File "/home/crackthumb/environments/my_env/lib/python3.6/site-packages/sklearn/preprocessing/label.py", line 236, in fit_transform
    self.classes_, y = _encode(y, encode=True)
  File "/home/crackthumb/environments/my_env/lib/python3.6/site-packages/sklearn/preprocessing/label.py", line 107, in _encode
    raise TypeError("argument must be a string or number")
TypeError: argument must be a string or number
回溯(最近一次呼叫最后一次):
文件“/home/crackthumb/environments/my_env/lib/python3.6/site packages/sklearn/preprocessing/label.py”,第105行,in_encode
res=_encode_python(值、唯一性、编码)
python中的文件“/home/crackthumb/environments/my_env/lib/python3.6/site packages/sklearn/preprocessing/label.py”,第59行
uniques=已排序(设置(值))
TypeError:不可损坏的类型:“列表”
在处理上述异常期间,发生了另一个异常:
回溯(最近一次呼叫最后一次):
文件“Classifier.py”,第21行,在
序列y=编码器。拟合变换(序列y)
文件“/home/crackthumb/environments/my_env/lib/python3.6/site packages/sklearn/preprocessing/label.py”,第236行,在fit_transform中
self.classes,y=\u encode(y,encode=True)
文件“/home/crackthumb/environments/my_env/lib/python3.6/site packages/sklearn/preprocessing/label.py”,第107行,in_encode
raise TypeError(“参数必须是字符串或数字”)
TypeError:参数必须是字符串或数字

labels是一个列表列表,而不是字符串列表,这是导致问题的原因。labels是一个列表列表,而不是字符串列表,这是导致问题的原因。
from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn import decomposition, ensemble
import pandas, xgboost, numpy, string
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.svm import SVC

data = open('dataset.csv').read()
labels = []
texts = []

for i ,line in enumerate(data.split("\n")):
    content = line.split("\",")
    texts.append(str(content[0]))
    labels.append(str(content[1:]))

trainDF = pandas.DataFrame()
trainDF['text'] = texts
trainDF['label'] = labels

train_x, valid_x, train_y, valid_y = model_selection.train_test_split(trainDF['text'],trainDF['label'],test_size = 0.2,random_state = 0)
encoder = preprocessing.LabelEncoder()
train_y = encoder.fit_transform(train_y)
valid_y = encoder.fit_transform(valid_y)

from sklearn.pipeline import Pipeline
text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', SVC(kernel='rbf'))])
text_clf.fit(train_x, train_y)

predicted = text_clf.predict(valid_x)

from sklearn.metrics import classification_report, confusion_matrix, accuracy_score

print(confusion_matrix(valid_y,predicted))
print(classification_report(valid_y,predicted))
print(accuracy_score(valid_y,predicted))