Python 3.x multiprocessing.pool.MaybeEncodingError:发送结果时出错
有人能解释一下下面的错误吗 multiprocessing.pool.MaybeEncodingError:发送结果时出错: “[>]”。Python 3.x multiprocessing.pool.MaybeEncodingError:发送结果时出错,python-3.x,flask,multiprocessing,python-multiprocessing,Python 3.x,Flask,Multiprocessing,Python Multiprocessing,有人能解释一下下面的错误吗 multiprocessing.pool.MaybeEncodingError:发送结果时出错: “[>]”。 原因:“NotImplementedError”([E112]不支持对跨度进行酸洗, 因为跨度只是父文档的视图,不能存在于其上 自己的。一个腌制的span必须包括它的Doc和Vocab, 这实际上与酸洗父文档没有任何缺点 直接。因此,与其酸洗跨度,不如酸洗它所属的文档 将Span.as_doc转换为或使用Span.as_doc将Span转换为独立的文档对象。
原因:“NotImplementedError”([E112]不支持对跨度进行酸洗, 因为跨度只是父文档的视图,不能存在于其上 自己的。一个腌制的span必须包括它的Doc和Vocab, 这实际上与酸洗父文档没有任何缺点 直接。因此,与其酸洗跨度,不如酸洗它所属的文档 将Span.as_doc转换为或使用Span.as_doc将Span转换为独立的文档对象。“)” 下面是我的代码: resume\u parser2.py
class Resume_parser2(object):
inputString = ''
skill = ['Java']
def __init__(self, resume):
self.__matcher = Matcher(nlp.vocab)
self.__skills = {
'skills' : None
}
self.__text = utils.extract_text(self.__resume, os.path.splitext(self.__resume)[1])
self.__text = ' '.join(self.__text.split())
self.__nlp = nlp(self.__text)
self.__noun_chunks = list(self.__nlp.noun_chunks)
def __get_basic_details(self):
skills = utils.extract_skills(self.__nlp,self.__noun_chunks)
self.__skills['skills'] = skills
return
def check_skills(self):
skill = ['Java']
if skill in self.__skills:
return self.__skills
def filtered_resume(resume):
parser = Resume_parser2(resume)
return parser.check_skills
if __name__ == '__main__':
pool = mp.Pool(mp.cpu_count())
resumes = []
data = []
for root, directories, filenames in os.walk('resumes'):
for filename in filenames:
file = os.path.join(root, filename)
resumes.append(file)
results = [pool.apply_async(filtered_resume, args=(x,)) for x in resumes]
results = [p.get() for p in results]
from resume_parser2 import Resume_parser2
from flask import Flask, request, redirect, url_for,send_from_directory, jsonify
from werkzeug.utils import secure_filename
import json
import multiprocessing as mp
import pandas as pd
import os
direct = 'E:/parsertool/backupresumes'
app = Flask(__name__)
def print_cyan(text):
print("\033[96m {}\033[00m" .format(text))
def extract_from_directory(directory):
if os.path.exists(directory):
pool = mp.Pool(mp.cpu_count())
resumes = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if (extension == '.pdf' or extension == '.docx'):
file = os.path.join(root, filename)
resumes.append(file)
results = pool.map(filtered_resume, resumes)
pool.close()
pool.join()
return results
else:
return 'Directory not found.'
def filtered_resume(resume):
parser = Resume_parser2(resume)
return parser.check_skills
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
#files = request.files.getlist('files')
#folder = dirname(files[0].filename)
#print(folder)
return jsonify(extract_from_directory(direct))
if __name__ == '__main__':
app.run(debug=True)
utils.extract\u text
是从文件中提取文本的函数,而utils.extract\u skills
是从文件中提取技能集的函数。这两者都在提取数据
Api.py
class Resume_parser2(object):
inputString = ''
skill = ['Java']
def __init__(self, resume):
self.__matcher = Matcher(nlp.vocab)
self.__skills = {
'skills' : None
}
self.__text = utils.extract_text(self.__resume, os.path.splitext(self.__resume)[1])
self.__text = ' '.join(self.__text.split())
self.__nlp = nlp(self.__text)
self.__noun_chunks = list(self.__nlp.noun_chunks)
def __get_basic_details(self):
skills = utils.extract_skills(self.__nlp,self.__noun_chunks)
self.__skills['skills'] = skills
return
def check_skills(self):
skill = ['Java']
if skill in self.__skills:
return self.__skills
def filtered_resume(resume):
parser = Resume_parser2(resume)
return parser.check_skills
if __name__ == '__main__':
pool = mp.Pool(mp.cpu_count())
resumes = []
data = []
for root, directories, filenames in os.walk('resumes'):
for filename in filenames:
file = os.path.join(root, filename)
resumes.append(file)
results = [pool.apply_async(filtered_resume, args=(x,)) for x in resumes]
results = [p.get() for p in results]
from resume_parser2 import Resume_parser2
from flask import Flask, request, redirect, url_for,send_from_directory, jsonify
from werkzeug.utils import secure_filename
import json
import multiprocessing as mp
import pandas as pd
import os
direct = 'E:/parsertool/backupresumes'
app = Flask(__name__)
def print_cyan(text):
print("\033[96m {}\033[00m" .format(text))
def extract_from_directory(directory):
if os.path.exists(directory):
pool = mp.Pool(mp.cpu_count())
resumes = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if (extension == '.pdf' or extension == '.docx'):
file = os.path.join(root, filename)
resumes.append(file)
results = pool.map(filtered_resume, resumes)
pool.close()
pool.join()
return results
else:
return 'Directory not found.'
def filtered_resume(resume):
parser = Resume_parser2(resume)
return parser.check_skills
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
#files = request.files.getlist('files')
#folder = dirname(files[0].filename)
#print(folder)
return jsonify(extract_from_directory(direct))
if __name__ == '__main__':
app.run(debug=True)
在这里,我无法理解错误。有人能解释一下是什么错误或如何处理吗?代码有点复杂,所以我没有尝试重现,但首先尝试更改:
def filtered_resume(resume):
parser = Resume_parser2(resume)
return parser.check_skills
至(注意第3行末尾添加的括号):
更长的(可能的)解释:
Python会对进程之间传递的任何数据进行pickle(本质上是序列化)。因此,在这种情况下,从filtered\u resume
返回的结果在返回到原始进程之前会被pickle
我猜,如果return parser.check_skills
末尾没有括号,filtered_resume
函数将返回方法check_skills
,而不是调用它并返回结果(self.\u skills
)。因此Python尝试对check\u skills
函数进行pickle处理。pickle方法应该可以,但这一方法需要对整个Resume\u parser2
对象进行pickle(至少因为它包含对self
的显式引用,可能是因为Python会对对象进行pickle,因为check\u skills
是对象的一部分)
酸洗对象,就像方法一样,应该可以。但是,如果nlp
是对空间对象的引用(我猜是这样的),那么您开始遇到pickle空间对象所带来的一些奇怪之处。同样,我只是猜测,但我假设\uu noon\u chunks
是spaCy生成的一组跨度,因此当Resume\u parser2
对象被pickle时,它会尝试pickle\uu noon\u chunks
而spaCy会抱怨,因为它们是跨度
如果您做了我上面建议的更改,它应该只尝试pickle
\u skills
变量,并且假设它不包含跨度,它应该可以pickle。但是,如果这不起作用,则必须更新代码,以消除跨距或将其转换为spaCy文档(如错误消息本身所述)……我们可以在遇到它时跨越这一桥梁。该代码有点复杂,因此我没有尝试复制,但首先尝试更改:
def filtered_resume(resume):
parser = Resume_parser2(resume)
return parser.check_skills
至(注意第3行末尾添加的括号):
更长的(可能的)解释:
Python会对进程之间传递的任何数据进行pickle(本质上是序列化)。因此,在这种情况下,从filtered\u resume
返回的结果在返回到原始进程之前会被pickle
我猜,如果return parser.check_skills
末尾没有括号,filtered_resume
函数将返回方法check_skills
,而不是调用它并返回结果(self.\u skills
)。因此Python尝试对check\u skills
函数进行pickle处理。pickle方法应该可以,但这一方法需要对整个Resume\u parser2
对象进行pickle(至少因为它包含对self
的显式引用,可能是因为Python会对对象进行pickle,因为check\u skills
是对象的一部分)
酸洗对象,就像方法一样,应该可以。但是,如果nlp
是对空间对象的引用(我猜是这样的),那么您开始遇到pickle空间对象所带来的一些奇怪之处。同样,我只是猜测,但我假设\uu noon\u chunks
是spaCy生成的一组跨度,因此当Resume\u parser2
对象被pickle时,它会尝试pickle\uu noon\u chunks
而spaCy会抱怨,因为它们是跨度
如果您做了我上面建议的更改,它应该只尝试pickle\u skills
变量,并且假设它不包含跨度,它应该可以pickle。但是,如果这不起作用,则必须更新代码以消除跨距或将其转换为spaCy文档(如错误消息本身所述)…我们可以在遇到它时跨越这座桥