Python-如何使下面的函数线程化?
一般来说,我对python和编程都很陌生——我想知道如何收紧下面的函数并节省一些时间。一些背景资料: 要求是收集给定顶级文件夹中每个子文件/文件夹的名称和ID。问题是,我请求数据的服务器将只返回单个文件夹的内容,而响应将始终指定返回的对象是文件还是文件夹 (伪代码示例,仅尝试快速演示): 然后,我必须迭代每个响应并发回服务器以获得下一个响应集,在某些情况下,整个顶级文件夹可能包含多达15000个文件夹和30000多个随机分布的文件,其中一些文件夹包含1个文件和15个文件夹,另一些文件夹包含7个文件和2个子文件夹等 API本身响应非常快,但是我不知道它可以处理多少并发连接,所以我需要能够在函数中进行调优并找到最佳点,估计它可以处理10-50个。我现在的职能是:Python-如何使下面的函数线程化?,python,multithreading,python-multiprocessing,Python,Multithreading,Python Multiprocessing,一般来说,我对python和编程都很陌生——我想知道如何收紧下面的函数并节省一些时间。一些背景资料: 要求是收集给定顶级文件夹中每个子文件/文件夹的名称和ID。问题是,我请求数据的服务器将只返回单个文件夹的内容,而响应将始终指定返回的对象是文件还是文件夹 (伪代码示例,仅尝试快速演示): 然后,我必须迭代每个响应并发回服务器以获得下一个响应集,在某些情况下,整个顶级文件夹可能包含多达15000个文件夹和30000多个随机分布的文件,其中一些文件夹包含1个文件和15个文件夹,另一些文件夹包含7个文
def drill_folder_loop(folder_list, project_id, directory, jsession_id):
count = 0
temp_folder_list = folder_list #\\ A list of dicts [{folder_name1 : folder_id1}, {folder_name2 : folder_id2}]
while count == 0:
temp_folder_list1 = []
for folder in temp_folder_list:
f_name = folder.keys()[0] #// folder_name (not actually used here)
f_id = folder.values()[0] #// folder id
folder_dict = list_folders_files(f_id, jsession_id) #// list_folders_files posts to the api and builds the response back into a list of dicts, same as the original passed to this function.
folder_list = process_folder_files(folder_dict, directory, jsession_id) #// somewhat irrelevant to the question - I have to commit the file data to a DB, I could strip the folder list in this function but since i need it there I just return it instead.
process_recipients = recipient_process(folder_dict, "no", f_id,
directory, project_id)#// more irrelevant but necessary processing.
for i in range(0, len(folder_list)):
append_f = folder_list[i]
temp_folder_list1.append(append_f)#// append new folders to list outside loop
temp_folder_list = [] #// empty temp_folder_list, loop may contain more than a single folder so I have to empty it once I've processed all the folders
for i in range(0, len(temp_folder_list1)):#// Put the new folder list into temp_folder_list so the loop restarts
append_f2 = temp_folder_list1[i]
temp_folder_list.append(append_f2)
if not temp_folder_list: #// end the loop if there are no more sub-folders
count += 1
return log_info("Folder loop complete")
重读这篇文章是一堂关于变量命名的好课。。。这并不是最简洁的。。代码本身工作得很好,但正如您现在可能想象的那样,翻阅数千个文件夹需要很长时间。。。关于如何将其转变为多线程/处理beast,有什么建议/指导吗?感谢您抽出时间阅读此文章
编辑:
为了清楚起见,我不想在循环中处理文件夹,而是希望在线程中分配它们,使它们具有多个文件夹,从而发布同时发生的请求和响应,以便整个过程花费更少的时间。现在,它一次只在一个文件夹中循环。。希望这能澄清
编辑:
根据Noctis Skytower的建议,我做了一些小改动来支持python 2.7(Queue vs Queue and.clock(),而不是perf_counter())。太近了!我遇到的问题是,当我将正在运行的线程更改为1时,它会完美地完成-当我出于某种原因(随机)将其增加回25时,dfl_worker()中的变量f_id为None。考虑到它可以使用1个线程,我猜这不是建议的问题,而是代码中的其他内容,所以我会将其标记为已接受。谢谢
class ThreadPool:
def __init__(self, count, timeout, daemonic):
self.__busy = 0
self.__idle = clock()
self.__jobs = Queue()
self.__lock = Lock()
self.__pool = []
self.__timeout = timeout
for _ in range(count):
thread = Thread(target=self.__worker)
thread.daemon = daemonic
thread.start()
self.__pool.append(thread)
def __worker(self):
while True:
try:
function, args, kwargs = self.__jobs.get(True, 0.1)
except Empty:
with self.__lock:
if self.__busy:
continue
if clock() - self.__idle < self.__timeout:
continue
break
else:
with self.__lock:
self.__busy += 1
try:
function(*args, **kwargs)
except:
pass
with self.__lock:
self.__busy -= 1
self.__idle = clock()
def apply(self, function, *args, **kwargs):
self.__pool = list(filter(Thread.is_alive, self.__pool))
if not self.__pool:
raise RuntimeError('ThreadPool has no running Threads')
self.__jobs.put((function, args, kwargs))
def join(self):
for thread in self.__pool:
thread.join()
def drill_folder_loop(folder_list, project_id, directory, jsession_id):
tp = ThreadPool(25, 1, False)
tp.apply(dfl_worker, tp, folder_list, project_id, directory, jsession_id)
tp.join()
def dfl_worker(tp, folder_list, project_id, directory, jsession_id):
for folder in folder_list:
f_name = folder.keys()[0]
f_id = folder.values()[0]
f_dict = list_folders_files(f_id, jsession_id)
f_list = process_folder_files(f_dict, directory, jsession_id)
tp.apply(dfl_worker, tp, f_list, project_id, directory, jsession_id)
recipient_process(f_dict, 'no', f_id, directory, project_id)
log_info('One folder processed')
类线程池:
定义初始化(self、count、timeout、daemonic):
self.\uuu busy=0
self.\uuu idle=时钟()
self.\uuu作业=队列()
self.\uu lock=lock()
self.\uu pool=[]
self.\uuu timeout=超时
对于范围内的(计数):
线程=线程(目标=自身工作者)
thread.daemon=daemonic
thread.start()
self.\u pool.append(线程)
def_____工作人员(自身):
尽管如此:
尝试:
函数,args,kwargs=self.\uu jobs.get(True,0.1)
除空外:
带自锁:
如果self.\u忙:
持续
如果时钟()-self.\u空闲
理解此代码的用途有点棘手
据我所知,你想使这段代码多线程;要实现这一点,您需要找到一个可以独立执行的例程/任务集
,然后您可以将其从循环中取出并创建一个独立的函数
def task(someparams):
#mytasks using the required someparams
现在,您可以创建一组工作线程,并为它们分配工作以运行任务
例程并完成工作
以下是如何多线程执行任务
例程:
import thread
WORKER_THREAD = 10
c = 0
while c < WORKER_THREAD:
thread.start_new_thread( task, (someparams, ) )
while 1:
pass
导入线程
辅助线程=10
c=0
当c
线程。启动新线程(任务,(某些参数),)我可以推荐以下内容吗
from queue import Empty, Queue
from threading import Lock, Thread
from time import perf_counter
def drill_folder_loop(folder_list, project_id, directory, jsession_id):
while True:
next_folder_list = []
for folder in folder_list:
f_name, f_id = folder.popitem()
f_dict = list_folders_files(f_id, jsession_id)
f_list = process_folder_files(f_dict, directory, jsession_id)
recipient_process(f_dict, 'no', f_id, directory, project_id)
next_folder_list.extend(f_list)
if not next_folder_list:
break
folder_list = next_folder_list
return log_info('Folder loop complete')
###############################################################################
class ThreadPool:
def __init__(self, count, timeout, daemonic):
self.__busy = 0
self.__idle = perf_counter()
self.__jobs = Queue()
self.__lock = Lock()
self.__pool = []
self.__timeout = timeout
for _ in range(count):
thread = Thread(target=self.__worker)
thread.daemon = daemonic
thread.start()
self.__pool.append(thread)
def __worker(self):
while True:
try:
function, args, kwargs = self.__jobs.get(True, 0.1)
except Empty:
with self.__lock:
if self.__busy:
continue
if perf_counter() - self.__idle < self.__timeout:
continue
break
else:
with self.__lock:
self.__busy += 1
try:
function(*args, **kwargs)
except:
pass
with self.__lock:
self.__busy -= 1
self.__idle = perf_counter()
def apply(self, function, *args, **kwargs):
self.__pool = list(filter(Thread.is_alive, self.__pool))
if not self.__pool:
raise RuntimeError('ThreadPool has no running Threads')
self.__jobs.put((function, args, kwargs))
def join(self):
for thread in self.__pool:
thread.join()
def drill_folder_loop(folder_list, project_id, directory, jsession_id):
tp = ThreadPool(25, 1, False)
tp.apply(dfl_worker, tp, folder_list, project_id, directory, jsession_id)
tp.join()
def dfl_worker(tp, folder_list, project_id, directory, jsession_id):
for folder in folder_list:
f_name, f_id = folder.popitem()
f_dict = list_folders_files(f_id, jsession_id)
f_list = process_folder_files(f_dict, directory, jsession_id)
tp.apply(dfl_worker, tp, f_list, project_id, directory, jsession_id)
recipient_process(f_dict, 'no', f_id, directory, project_id)
log_info('One folder processed')
从队列导入空,队列
从线程导入锁,线程
从时间导入性能计数器
def drill_folder_loop(文件夹列表、项目id、目录、会话id):
尽管如此:
下一个文件夹列表=[]
对于文件夹列表中的文件夹:
f_name,f_id=folder.popitem()
f_dict=列出文件夹文件(f_id,jsession_id)
f_列表=处理文件夹文件(f_目录、目录、会话id)
收件人流程(f_dict,'no',f_id,目录,项目id)
下一个文件夹列表。扩展(文件夹列表)
如果不是下一个文件夹列表:
打破
法罗群岛
from queue import Empty, Queue
from threading import Lock, Thread
from time import perf_counter
def drill_folder_loop(folder_list, project_id, directory, jsession_id):
while True:
next_folder_list = []
for folder in folder_list:
f_name, f_id = folder.popitem()
f_dict = list_folders_files(f_id, jsession_id)
f_list = process_folder_files(f_dict, directory, jsession_id)
recipient_process(f_dict, 'no', f_id, directory, project_id)
next_folder_list.extend(f_list)
if not next_folder_list:
break
folder_list = next_folder_list
return log_info('Folder loop complete')
###############################################################################
class ThreadPool:
def __init__(self, count, timeout, daemonic):
self.__busy = 0
self.__idle = perf_counter()
self.__jobs = Queue()
self.__lock = Lock()
self.__pool = []
self.__timeout = timeout
for _ in range(count):
thread = Thread(target=self.__worker)
thread.daemon = daemonic
thread.start()
self.__pool.append(thread)
def __worker(self):
while True:
try:
function, args, kwargs = self.__jobs.get(True, 0.1)
except Empty:
with self.__lock:
if self.__busy:
continue
if perf_counter() - self.__idle < self.__timeout:
continue
break
else:
with self.__lock:
self.__busy += 1
try:
function(*args, **kwargs)
except:
pass
with self.__lock:
self.__busy -= 1
self.__idle = perf_counter()
def apply(self, function, *args, **kwargs):
self.__pool = list(filter(Thread.is_alive, self.__pool))
if not self.__pool:
raise RuntimeError('ThreadPool has no running Threads')
self.__jobs.put((function, args, kwargs))
def join(self):
for thread in self.__pool:
thread.join()
def drill_folder_loop(folder_list, project_id, directory, jsession_id):
tp = ThreadPool(25, 1, False)
tp.apply(dfl_worker, tp, folder_list, project_id, directory, jsession_id)
tp.join()
def dfl_worker(tp, folder_list, project_id, directory, jsession_id):
for folder in folder_list:
f_name, f_id = folder.popitem()
f_dict = list_folders_files(f_id, jsession_id)
f_list = process_folder_files(f_dict, directory, jsession_id)
tp.apply(dfl_worker, tp, f_list, project_id, directory, jsession_id)
recipient_process(f_dict, 'no', f_id, directory, project_id)
log_info('One folder processed')