Python3 asyncio和MotorClient:如何将motor与多线程和多事件循环一起使用
我回来问一个关于asyncio的问题。我发现它非常有用(特别是由于带有线程的GIL),我正在尝试提高一些代码的性能 我的应用程序正在执行以下操作:Python3 asyncio和MotorClient:如何将motor与多线程和多事件循环一起使用,python,multithreading,python-asyncio,tornado-motor,pymongo-3.x,Python,Multithreading,Python Asyncio,Tornado Motor,Pymongo 3.x,我回来问一个关于asyncio的问题。我发现它非常有用(特别是由于带有线程的GIL),我正在尝试提高一些代码的性能 我的应用程序正在执行以下操作: 1后台守护进程线程“A”从连接的客户端接收事件,并通过填充SetQueue(这只是一个删除重复ID的事件队列)和在DB中进行一些插入来作出反应。我从另一个模块获得这个守护进程(基本上,我在接收到事件时控制回调)。在下面的示例代码中,我用生成的线程替换了它,该线程非常简单地用20个项目填充队列,并在退出之前模拟DB插入 1后台守护进程线程“B”被启动
- 1后台守护进程线程“A”从连接的客户端接收事件,并通过填充SetQueue(这只是一个删除重复ID的事件队列)和在DB中进行一些插入来作出反应。我从另一个模块获得这个守护进程(基本上,我在接收到事件时控制回调)。在下面的示例代码中,我用生成的线程替换了它,该线程非常简单地用20个项目填充队列,并在退出之前模拟DB插入
- 1后台守护进程线程“B”被启动(loop_start),他只是循环运行,直到完成一个协同程序:
- 获取队列中的所有项目(如果不是空的,则释放控件x秒,然后重新启动协同路由)
- 对于队列中的每个id,它将启动一个链式协同路由,该协同路由:
- 创建并等待一个任务,该任务仅从数据库获取该id的所有相关信息。我正在使用支持异步IO的MotorClient来完成任务本身的等待
- 使用进程池executor按id启动进程,该进程使用DB数据执行CPU密集型处理
- 主线程只是初始化db_客户机并执行loop_start和stop命令
motor.motor\u asyncio.AsyncioMotorClient()
:
\uuuuu init\uuuuu
import threading
import asyncio
import concurrent.futures
import functools
import os
import time
import logging
from random import randint
from queue import Queue
# create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('{}.log'.format(__name__))
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(processName)s - %(threadName)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
class SetQueue(Queue):
"""Queue that avoids duplicate entries while keeping an order."""
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = set()
def _put(self, item):
if type(item) is not int:
raise TypeError
self.queue.add(item)
def _get(self):
# Get always all items in a thread-safe manner
ret = self.queue.copy()
self.queue.clear()
return ret
class Asynchro:
def __init__(self, event_queue):
self.__daemon = None
self.__daemon_terminate = False
self.__queue = event_queue
def fake_populate(self, size):
t = threading.Thread(target=self.worker, args=(size,))
t.daemon = True
t.start()
def worker(self, size):
run = True
populate_event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(populate_event_loop)
cors = [self.worker_cor(i, populate_event_loop) for i in range(size)]
done, pending = populate_event_loop.run_until_complete(asyncio.wait(cors))
logger.debug('Finished to populate event queue with result done={}, pending={}.'.format(done, pending))
while run:
# Keep it alive to simulate something still alive (minor traffic)
time.sleep(5)
rand = randint(100, 200)
populate_event_loop.run_until_complete(self.worker_cor(rand, populate_event_loop))
if self.__daemon_terminate:
logger.debug('Closed the populate_event_loop.')
populate_event_loop.close()
run = False
async def worker_cor(self, i, loop):
time.sleep(0.5)
self.__queue.put(i)
logger.debug('Wrote {} in the event queue that has now size {}.'.format(i, self.__queue.qsize()))
# Launch fake DB Insertions
#db_task = loop.create_task(self.fake_db_insert(i))
db_data = await self.fake_db_insert(i)
logger.info('Finished to populate with id {}'.format(i))
return db_data
@staticmethod
async def fake_db_insert(item):
# Fake some DB insert
logger.debug('Starting fake db insertion with id {}'.format(item))
st = randint(1, 101) / 100
await asyncio.sleep(st)
logger.debug('Finished db insertion with id {}, sleep {}'.format(item, st))
return item
def loop_start(self):
logger.info('Starting the loop.')
if self.__daemon is not None:
raise Exception
self.__daemon_terminate = False
self.__daemon = threading.Thread(target=self.__daemon_main)
self.__daemon.daemon = True
self.__daemon.start()
def loop_stop(self):
logger.info('Stopping the loop.')
if self.__daemon is None:
raise Exception
self.__daemon_terminate = True
if threading.current_thread() != self.__daemon:
self.__daemon.join()
self.__daemon = None
logger.debug('Stopped the loop and closed the event_loop.')
def __daemon_main(self):
logger.info('Background daemon started (inside __daemon_main).')
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
run, rc = True, 0
while run:
logger.info('Inside \"while run\".')
event_loop.run_until_complete(self.__cor_main())
if self.__daemon_terminate:
event_loop.close()
run = False
rc = 1
return rc
async def __cor_main(self):
# If nothing in the queue release control for a bit
if self.__queue.qsize() == 0:
logger.info('Event queue is empty, going to sleep (inside __cor_main).')
await asyncio.sleep(10)
return
# Extract all items from event queue
items = self.__queue.get()
# Run asynchronously DB extraction and processing on the ids (using pool of processes)
with concurrent.futures.ProcessPoolExecutor(max_workers=8) as executor:
cors = [self.__cor_process(item, executor) for item in items]
logger.debug('Launching {} coroutines to elaborate queue items (inside __cor_main).'.format(len(items)))
done, pending = await asyncio.wait(cors)
logger.debug('Finished to execute __cor_main with result {}, pending {}'
.format([t.result() for t in done], pending))
async def __cor_process(self, item, executor):
# Extract corresponding DB data
event_loop = asyncio.get_event_loop()
db_task = event_loop.create_task(self.fake_db_access(item))
db_data = await db_task
# Heavy processing of data done in different processes
logger.debug('Launching processes to elaborate db_data.')
res = await event_loop.run_in_executor(executor, functools.partial(self.fake_processing, db_data, None))
return res
@staticmethod
async def fake_db_access(item):
# Fake some db access
logger.debug('Starting fake db access with id {}'.format(item))
st = randint(1, 301) / 100
await asyncio.sleep(st)
logger.debug('Finished db access with id {}, sleep {}'.format(item, st))
return item
@staticmethod
def fake_processing(db_data, _):
# fake some CPU processing
logger.debug('Starting fake processing with data {}'.format(db_data))
st = randint(1, 101) / 10
time.sleep(st)
logger.debug('Finished fake processing with data {}, sleep {}, process id {}'.format(db_data, st, os.getpid()))
return db_data
def main():
# Event queue
queue = SetQueue()
return Asynchro(event_queue=queue)
if __name__ == '__main__':
a = main()
a.fake_populate(20)
time.sleep(5)
a.loop_start()
time.sleep(20)
a.loop_stop()
运行多个事件循环的原因是什么 我建议只在主线程中使用单循环,这是异步IO的本机模式
asyncio可能在非常罕见的情况下在非主线程中运行循环,但它与您的情况不同。线程“A”由mqtt模块(在实际代码中)生成,并且需要能够调用协同路由(例如,“worker\u cor”)…这需要在线程中使用事件循环。同时,线程“B”是一个后台守护进程,用于检查事件队列…需要在队列不为空时发送协同路由…另一个循环!在实际代码中,主线程需要启动一些db协同程序…第三个循环!如何对这些线程使用一个事件循环?你能通过修改示例代码告诉我你的解决方案是什么(实际上一个回路意味着马达没有问题)没有进一步的解释或细节吗?我试图利用call_soon_threadsafe重用同一个循环,但我遇到了几个问题,包括:“callback None()句柄中的异常:回溯(最近的一次调用):文件“/usr/lib/python3.5/asyncio/events.py”,_runself中的第125行。我在这里很累。对不起,我还是不明白你为什么需要额外的循环。如果在循环中执行的代码被阻塞,请不要执行,而是调用
ran\u in\u executor
。如果您的库在内部压缩循环创建,这是一种非常糟糕的气味。将所有内容都保留在主循环中,并为长时间运行的作业创建异步IO任务。就这样。