Python AIOHTTP-Application.make_处理程序(…)不推荐使用-添加多处理

Python AIOHTTP-Application.make_处理程序(…)不推荐使用-添加多处理,python,python-3.x,multiprocessing,python-asyncio,aiohttp,Python,Python 3.x,Multiprocessing,Python Asyncio,Aiohttp,我经历了一段“我能从Python web服务器中挤出多少性能?”的旅程,这让我想到了AIOHTTP和uvloop。不过,我可以看出AIOHTTP并没有充分利用我的CPU。我开始在AIOHTTP中使用多处理。我了解到Linux内核有一个特性,允许多个进程共享同一个TCP端口。这使我开发了以下代码(工作非常出色): 在应用此代码之前,我的网站的wrk基准: Running 30s test @ http://127.0.0.1:8000/ 12 threads and 400 connectio

我经历了一段“我能从Python web服务器中挤出多少性能?”的旅程,这让我想到了AIOHTTP和uvloop。不过,我可以看出AIOHTTP并没有充分利用我的CPU。我开始在AIOHTTP中使用多处理。我了解到Linux内核有一个特性,允许多个进程共享同一个TCP端口。这使我开发了以下代码(工作非常出色):

在应用此代码之前,我的网站的wrk基准:

Running 30s test @ http://127.0.0.1:8000/
  12 threads and 400 connections
  Thread Stats   Avg      Stdev     Max   +/- Stdev
    Latency    54.33ms    6.54ms 273.24ms   89.95%
    Req/Sec   608.68    115.97     2.27k    83.63%
  218325 requests in 30.10s, 41.23MB read
  Non-2xx or 3xx responses: 218325
Requests/sec:   7254.17
Transfer/sec:      1.37MB

wrk基准测试后:

Running 30s test @ http://127.0.0.1:8000/
  12 threads and 400 connections
  Thread Stats   Avg      Stdev     Max   +/- Stdev
    Latency    15.96ms    7.27ms  97.29ms   84.78%
    Req/Sec     2.11k   208.30     4.45k    75.50%
  759290 requests in 30.08s, 153.51MB read
Requests/sec:  25242.39
Transfer/sec:      5.10MB
哇!但有一个问题:

DeprecationWarning: Application.make_handler(...) is deprecated, use AppRunner API instead
  protocol_factory=app.make_handler()
所以我试了一下:

import asyncio
import os
import socket
import time
from aiohttp import web
from concurrent.futures import ProcessPoolExecutor
from multiprocessing import cpu_count

CPU_COUNT = cpu_count()
print("CPU Count:", CPU_COUNT)

def mk_socket(host="127.0.0.1", port=8000, reuseport=False):
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    if reuseport:
        SO_REUSEPORT = 15
        sock.setsockopt(socket.SOL_SOCKET, SO_REUSEPORT, 1)
    sock.bind((host, port))
    return sock

async def handle(request):
    name = request.match_info.get('name', "Anonymous")
    pid = os.getpid()
    text = "{:.2f}: Hello {}! Process {} is treating you\n".format(
        time.time(), name, pid)
    #time.sleep(5)  # intentionally blocking sleep to simulate CPU load
    return web.Response(text=text)

async def start_server():
    host = "127.0.0.1"
    port=8000
    reuseport = True
    app = web.Application()
    sock = mk_socket(host, port, reuseport=reuseport)
    app.add_routes([web.get('/', handle),
                    web.get('/{name}', handle)])

    coro = loop.create_server(
            protocol_factory=app.make_handler(),
            sock=sock,
        )
    runner = web.AppRunner(app)
    await runner.setup()
    srv = web.TCPSite(runner, 'localhost', 8000)
    await srv.start()
    print('Server started at http://127.0.0.1:8000')
    return coro, app, runner

async def finalize(srv, app, runner):
    sock = srv.sockets[0]
    app.loop.remove_reader(sock.fileno())
    sock.close()

    #await handler.finish_connections(1.0)
    await runner.cleanup()
    srv.close()
    await srv.wait_closed()
    await app.finish()

def init():
    loop = asyncio.get_event_loop()
    srv, app, runner = loop.run_until_complete(init)
    try:
        loop.run_forever()
    except KeyboardInterrupt:
        loop.run_until_complete((finalize(srv, app, runner)))


if __name__ == '__main__':
    with ProcessPoolExecutor() as executor:
        for i in range(0, CPU_COUNT):
            executor.submit(init)

这显然是不完整的,因为没有使用coro。我不确定在何处将套接字与AppRunner集成。答案应显示修改为使用App Runner的原始示例。

由于这是我第一次使用协同程序和aiohttp,我可能错了,但它似乎适用于SockSite:

#!/usr/bin/env python

import asyncio
import os
import socket
import time
import traceback
from aiohttp import web
from concurrent.futures import ProcessPoolExecutor
from multiprocessing import cpu_count

CPU_COUNT = cpu_count()
print("CPU Count:", CPU_COUNT)

def mk_socket(host="127.0.0.1", port=9090, reuseport=False):
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    if reuseport:
        SO_REUSEPORT = 15
        sock.setsockopt(socket.SOL_SOCKET, SO_REUSEPORT, 1)
    sock.bind((host, port))
    return sock

async def handle(request):
    name = request.match_info.get('name', "Anonymous")
    pid = os.getpid()
    text = "{:.2f}: Hello {}! Process {} is treating you\n".format(
        time.time(), name, pid)
    #time.sleep(5)  # intentionally blocking sleep to simulate CPU load
    return web.Response(text=text)

async def start_server():
    try:
        host = "127.0.0.1"
        port=9090
        reuseport = True
        app = web.Application()
        app.add_routes([web.get('/', handle),
                        web.get('/{name}', handle)])
        runner = web.AppRunner(app)
        await runner.setup()
        sock = mk_socket(host, port, reuseport=reuseport)
        srv = web.SockSite(runner, sock)
        await srv.start()
        print('Server started at http://127.0.0.1:9090')
        return srv, app, runner
    except Exception:
        traceback.print_exc()
        raise

async def finalize(srv, app, runner):
    sock = srv.sockets[0]
    app.loop.remove_reader(sock.fileno())
    sock.close()

    #await handler.finish_connections(1.0)
    await runner.cleanup()
    srv.close()
    await srv.wait_closed()
    await app.finish()

def init():
    loop = asyncio.get_event_loop()
    srv, app, runner = loop.run_until_complete(start_server())
    try:
        loop.run_forever()
    except KeyboardInterrupt:
        loop.run_until_complete((finalize(srv, app, runner)))


if __name__ == '__main__':
    with ProcessPoolExecutor() as executor:
        for i in range(0, CPU_COUNT):
            executor.submit(init)
最终:

>curl http://127.0.0.1:9090
1580741746.47: Hello Anonymous! Process 54623 is treating you
>curl http://127.0.0.1:9090
1580741747.05: Hello Anonymous! Process 54620 is treating you
>curl http://127.0.0.1:9090
1580741747.77: Hello Anonymous! Process 54619 is treating you
>curl http://127.0.0.1:9090
1580741748.36: Hello Anonymous! Process 54621 is treating you
我还在finalize例程中添加了一个日志,它似乎被正确触发了


编辑:出于好奇,我在一个较旧的内核上进行了一次尝试,我确认在启用该选项时它不起作用(它与False一起工作)。

我想知道为什么不使用Gunicorn?有一个我不想支付的性能损失,以及我不想处理的额外复杂性。也因为我不使用nginx。我在aiohttp面前有一个不同的解决方案。谢谢。这似乎大部分是可行的,可以接受的。但是,在将此应用于我当前的代码并运行wrk压力测试(
/wrk-t12-c400-d30s)之后http://127.0.0.1:9090
),测试似乎运行正常,但一旦测试结束(可能是在最终连接断开时),我看到我的服务器输出了一系列行:
模块'asyncio.streams'没有属性'IncompleteReadError'
。这在单线程版本中不会发生。有什么想法吗?如果不是,我仍然会接受答案。我只是想弄明白。我想这可能与我在这个设置中使用AImySQL有关。找到了问题。如果有其他人遇到这种情况,那是因为AimySql还没有为python 3.8做好准备。你只能看到它的规模(许多连接)。不得不通过
pip-install-git手动安装fork+https://github.com/dongweiming/aiomysql.git@python38#egg=aimysql
直到他们将其合并到主回购中。
>curl http://127.0.0.1:9090
1580741746.47: Hello Anonymous! Process 54623 is treating you
>curl http://127.0.0.1:9090
1580741747.05: Hello Anonymous! Process 54620 is treating you
>curl http://127.0.0.1:9090
1580741747.77: Hello Anonymous! Process 54619 is treating you
>curl http://127.0.0.1:9090
1580741748.36: Hello Anonymous! Process 54621 is treating you