Warning: file_get_contents(/data/phpspider/zhask/data//catemap/8/logging/2.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
通过QueueHandler的Python多处理日志记录_Python_Logging_Multiprocessing - Fatal编程技术网

通过QueueHandler的Python多处理日志记录

通过QueueHandler的Python多处理日志记录,python,logging,multiprocessing,Python,Logging,Multiprocessing,我有一个Python多处理应用程序,我想在其中添加一些日志功能。 建议使用队列。每个进程将通过QueueHandler将日志记录放入其中,侦听器进程将通过预定义的Handler处理记录 以下是Python日志cookbook提供的示例: # You'll need these imports in your own code import logging import logging.handlers import multiprocessing # Next two import lines

我有一个Python多处理应用程序,我想在其中添加一些日志功能。 建议使用
队列
。每个进程将通过
QueueHandler
将日志记录放入其中,侦听器进程将通过预定义的
Handler
处理记录

以下是Python日志cookbook提供的示例:

# You'll need these imports in your own code
import logging
import logging.handlers
import multiprocessing

# Next two import lines for this demo only
from random import choice, random
import time

#
# Because you'll want to define the logging configurations for listener and workers, the
# listener and worker process functions take a configurer parameter which is a callable
# for configuring logging for that process. These functions are also passed the queue,
# which they use for communication.
#
# In practice, you can configure the listener however you want, but note that in this
# simple example, the listener does not apply level or filter logic to received records.
# In practice, you would probably want to do this logic in the worker processes, to avoid
# sending events which would be filtered out between processes.
#
# The size of the rotated files is made small so you can see the results easily.
def listener_configurer():
    root = logging.getLogger()
    h = logging.handlers.RotatingFileHandler('mptest.log', 'a', 300, 10)
    f = logging.Formatter('%(asctime)s %(processName)-10s %(name)s %(levelname)-8s %(message)s')
    h.setFormatter(f)
    root.addHandler(h)

# This is the listener process top-level loop: wait for logging events
# (LogRecords)on the queue and handle them, quit when you get a None for a
# LogRecord.
def listener_process(queue, configurer):
    configurer()
    while True:
        try:
            record = queue.get()
            if record is None:  # We send this as a sentinel to tell the listener to quit.
                break
            logger = logging.getLogger(record.name)
            logger.handle(record)  # No level or filter logic applied - just do it!
        except Exception:
            import sys, traceback
            print('Whoops! Problem:', file=sys.stderr)
            traceback.print_exc(file=sys.stderr)

# Arrays used for random selections in this demo

LEVELS = [logging.DEBUG, logging.INFO, logging.WARNING,
          logging.ERROR, logging.CRITICAL]

LOGGERS = ['a.b.c', 'd.e.f']

MESSAGES = [
    'Random message #1',
    'Random message #2',
    'Random message #3',
]

# The worker configuration is done at the start of the worker process run.
# Note that on Windows you can't rely on fork semantics, so each process
# will run the logging configuration code when it starts.
def worker_configurer(queue):
    h = logging.handlers.QueueHandler(queue)  # Just the one handler needed
    root = logging.getLogger()
    root.addHandler(h)
    # send all messages, for demo; no other level or filter logic applied.
    root.setLevel(logging.DEBUG)

# This is the worker process top-level loop, which just logs ten events with
# random intervening delays before terminating.
# The print messages are just so you know it's doing something!
def worker_process(queue, configurer):
    configurer(queue)
    name = multiprocessing.current_process().name
    print('Worker started: %s' % name)
    for i in range(10):
        time.sleep(random())
        logger = logging.getLogger(choice(LOGGERS))
        level = choice(LEVELS)
        message = choice(MESSAGES)
        logger.log(level, message)
    print('Worker finished: %s' % name)

# Here's where the demo gets orchestrated. Create the queue, create and start
# the listener, create ten workers and start them, wait for them to finish,
# then send a None to the queue to tell the listener to finish.
def main():
    queue = multiprocessing.Queue(-1)
    listener = multiprocessing.Process(target=listener_process,
                                       args=(queue, listener_configurer))
    listener.start()
    workers = []
    for i in range(10):
        worker = multiprocessing.Process(target=worker_process,
                                         args=(queue, worker_configurer))
        workers.append(worker)
        worker.start()
    for w in workers:
        w.join()
    queue.put_nowait(None)
    listener.join()

if __name__ == '__main__':
    main()

我的问题:Python日志烹饪书中的示例意味着必须将
队列
传递给将在多处理模式下执行的每个函数。如果你有一个小的应用程序,这当然有效,但如果你有一个更大的程序,它会变得丑陋。有没有一种方法可以使用像单例队列这样的东西,它通过
logging.config.dictConfig
创建一次,然后由所有进程共享,而不必将它传递给每个函数?

在您的例子中,一些简单的类就可以做到这一点

看一看,如果你需要一些进一步的解释或者想要一些不同的东西,请告诉我

导入日志
导入日志记录处理程序
导入多处理
导入多处理池
从随机导入选择,随机导入
导入时间
类ProcessLogger(multiprocessing.Process):
_全局进程记录器=无
定义初始化(自):
super()。\uuuu init\uuuuu()
self.queue=multiprocessing.queue(-1)
@类方法
def get_全局记录器(cls):
如果cls.\u全局\u过程\u记录器不是无:
返回cls.\u全局\u进程\u记录器
引发异常(“不存在全局进程记录器”)
@类方法
def创建全局记录器(cls):
cls.\u全局\u进程\u记录器=进程记录器()
返回cls.\u全局\u进程\u记录器
@静力学方法
def configure():
root=logging.getLogger()
h=logging.handlers.RotatingFileHandler('mptest.log','a',300,10)
f=logging.Formatter('%(asctime)s%(processName)-10s%(name)s%(levelname)-8s%(message)s')
h、 设置格式化程序(f)
root.addHandler(h)
def停止(自):
self.queue.put_nowait(无)
def运行(自):
self.configure()
尽管如此:
尝试:
record=self.queue.get()
如果记录为无:
打破
logger=logging.getLogger(record.name)
记录器.句柄(记录)
除例外情况外:
导入系统,回溯
打印('woops!Problem:',file=sys.stderr)
traceback.print_exc(file=sys.stderr)
def新_进程(self、target、args=[],kwargs={}):
返回带有日志记录的进程(self、target、args、kwargs)
def配置新进程(日志进程队列):
h=logging.handlers.QueueHandler(日志处理队列)
root=logging.getLogger()
root.addHandler(h)
root.setLevel(logging.DEBUG)
类ProcessWithLogging(multiprocessing.Process):
def uu init uu(self,target,args=[],kwargs={},log_process=None):
super()。\uuuu init\uuuuu()
self.target=目标
self.args=args
self.kwargs=kwargs
如果log_进程为无:
log\u process=ProcessLogger.get\u global\u logger()
self.log\u process\u queue=log\u process.queue
def运行(自):
配置新进程(self.log进程队列)
self.target(*self.args,**self.kwargs)
类PoolWithLogging(multiprocessing.pool.pool):
def uuu init uuuu(self,processs=None,context=None,log_uprocess=None):
如果log_进程为无:
log\u process=ProcessLogger.get\u global\u logger()
super(),
initargs=(log_process.queue,),context=context)
级别=[logging.DEBUG、logging.INFO、logging.WARNING、logging.ERROR、logging.CRITICAL]
伐木工人=['a.b.c','d.e.f']
消息=[
“随机消息#1”,
“随机消息#2”,
“随机消息#3”,
]
def工作进程(参数=无):
名称=多处理。当前进程()。名称
打印('工作进程已启动:%s'%name)
对于范围(10)内的i:
time.sleep(random())
logger=logging.getLogger(选项(记录器))
级别=选择(级别)
消息=选项(消息)
logger.log(级别、消息)
打印('Worker finished:{},param:{}'。格式(name,param))
返回参数
def main():
process\u logger=ProcessLogger.create\u global\u logger()
进程_logger.start()
工人=[]
对于范围(10)内的i:
worker=ProcessWithLogging(worker\u进程)
workers.append(worker)
worker.start()
对于在职工人:
w、 加入
使用PoolWithLogging(进程=4)作为池:
打印(pool.map(worker_进程,范围(8)))
进程_logger.stop()
进程_logger.join()
如果uuuu name uuuuuu='\uuuuuuu main\uuuuuuu':
main()

多处理模块非常灵活。如何调用多处理函数?您是否使用池,您自己的流程?你能添加一个最小的代码示例吗?我从Python日志记录食谱中添加了这个示例,非常感谢!这看起来很棒!对于通过池启动工作进程的场景,您有什么建议吗?请参阅我编辑的答案。如果你还需要什么,请告诉我。这太棒了!非常感谢。有没有办法让它也在Windows下工作?现在,Windows上有一个pickle问题:
TypeError:无法pickle weakref对象
谢谢你让我知道,Annamaria。我已经编辑了我的答案,现在应该可以在windows上运行了。让我知道它是否适合你。谢谢!!这太完美了!