Python,需要日志记录和日志旋转和压缩吗
任何人都可以在python中提出一种方法来进行日志记录:Python,需要日志记录和日志旋转和压缩吗,python,logging,Python,Logging,任何人都可以在python中提出一种方法来进行日志记录: 日志每天轮换 原木旋转时的压缩 可选-删除最旧的日志文件以保留X MB的可用空间 可选-将sftp日志文件发送到服务器 谢谢你的回复, 弗雷德 日志每天轮换:使用 日志压缩:设置encoding='bz2'参数。(请注意,此“技巧”仅适用于Python2。在Python3中,“bz2”不再被视为编码。) 可选-删除最旧的日志文件以保留X MB的可用空间。 您可以(间接地)使用。通过设置maxBytes参数,日志文件将在达到一定大小时滚
- 日志每天轮换
- 原木旋转时的压缩
- 可选-删除最旧的日志文件以保留X MB的可用空间
- 可选-将sftp日志文件发送到服务器
- 日志每天轮换:使用
- 日志压缩:设置
参数。(请注意,此“技巧”仅适用于Python2。在Python3中,“bz2”不再被视为编码。)encoding='bz2'
- 可选-删除最旧的日志文件以保留X MB的可用空间。
您可以(间接地)使用。通过设置
参数,日志文件将在达到一定大小时滚动。通过设置maxBytes
参数,可以控制保留的滚动次数。这两个参数一起允许您控制日志文件消耗的最大空间。您可能还可以将backupCount
子类化,以将此行为也纳入其中TimeRotatingFileHandler
为了好玩,下面是如何子类化
TimeRotatingFileHandler
。当您运行下面的脚本时,它会将日志文件写入/tmp/log\u rotate*
对于time.sleep
(如0.1)的值很小,日志文件会很快填满,达到最大字节数限制,然后被滚动
如果使用较大的time.sleep
(如1.0),日志文件填充速度较慢,未达到maxBytes限制,但当达到时间间隔(10秒)时,它们仍会滚动
下面的所有代码都来自。我只是以最直接的方式将TimeRotatingFileHandler与RotatingFileHandler进行了网格划分
import time
import re
import os
import stat
import logging
import logging.handlers as handlers
class SizedTimedRotatingFileHandler(handlers.TimedRotatingFileHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size, or at certain
timed intervals
"""
def __init__(self, filename, maxBytes=0, backupCount=0, encoding=None,
delay=0, when='h', interval=1, utc=False):
handlers.TimedRotatingFileHandler.__init__(
self, filename, when, interval, backupCount, encoding, delay, utc)
self.maxBytes = maxBytes
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
# due to non-posix-compliant Windows feature
self.stream.seek(0, 2)
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def demo_SizedTimedRotatingFileHandler():
log_filename = '/tmp/log_rotate'
logger = logging.getLogger('MyLogger')
logger.setLevel(logging.DEBUG)
handler = SizedTimedRotatingFileHandler(
log_filename, maxBytes=100, backupCount=5,
when='s', interval=10,
# encoding='bz2', # uncomment for bz2 compression
)
logger.addHandler(handler)
for i in range(10000):
time.sleep(0.1)
logger.debug('i=%d' % i)
demo_SizedTimedRotatingFileHandler()
除了unutbu的答案之外:下面是如何修改TimedRotatingFileHandler以使用zip文件进行压缩
import logging
import logging.handlers
import zipfile
import codecs
import sys
import os
import time
import glob
class TimedCompressedRotatingFileHandler(logging.handlers.TimedRotatingFileHandler):
"""
Extended version of TimedRotatingFileHandler that compress logs on rollover.
"""
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
self.stream.close()
# get the time that this sequence started at and make it a TimeTuple
t = self.rolloverAt - self.interval
timeTuple = time.localtime(t)
dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
# find the oldest log file and delete it
s = glob.glob(self.baseFilename + ".20*")
if len(s) > self.backupCount:
s.sort()
os.remove(s[0])
#print "%s -> %s" % (self.baseFilename, dfn)
if self.encoding:
self.stream = codecs.open(self.baseFilename, 'w', self.encoding)
else:
self.stream = open(self.baseFilename, 'w')
self.rolloverAt = self.rolloverAt + self.interval
if os.path.exists(dfn + ".zip"):
os.remove(dfn + ".zip")
file = zipfile.ZipFile(dfn + ".zip", "w")
file.write(dfn, os.path.basename(dfn), zipfile.ZIP_DEFLATED)
file.close()
os.remove(dfn)
if __name__=='__main__':
## Demo of using TimedCompressedRotatingFileHandler() to log every 5 seconds,
## to one uncompressed file and five rotated and compressed files
os.nice(19) # I always nice test code
logHandler = TimedCompressedRotatingFileHandler("mylog", when="S",
interval=5, backupCount=5) # Total of six rotated log files, rotating every 5 secs
logFormatter = logging.Formatter(
fmt='%(asctime)s.%(msecs)03d %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
logHandler.setFormatter(logFormatter)
mylogger = logging.getLogger('MyLogRef')
mylogger.addHandler(logHandler)
mylogger.setLevel(logging.DEBUG)
# Write lines non-stop into the logger and rotate every 5 seconds
ii = 0
while True:
mylogger.debug("Test {0}".format(ii))
ii += 1
在旋转期间压缩日志文件的另一种方法(python 3.3中新增)是使用BaseRotatingHandler(以及所有继承的)类属性rotator,例如:
import gzip
import os
import logging
import logging.handlers
class GZipRotator:
def __call__(self, source, dest):
os.rename(source, dest)
f_in = open(dest, 'rb')
f_out = gzip.open("%s.gz" % dest, 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
os.remove(dest)
logformatter = logging.Formatter('%(asctime)s;%(levelname)s;%(message)s')
log = logging.handlers.TimedRotatingFileHandler('debug.log', 'midnight', 1, backupCount=5)
log.setLevel(logging.DEBUG)
log.setFormatter(logformatter)
log.rotator = GZipRotator()
logger = logging.getLogger('main')
logger.addHandler(log)
logger.setLevel(logging.DEBUG)
....
您可以看到更多信息。要复制文件,请gzip复制的文件(使用历元时间),然后以不会打乱日志模块的方式清除现有文件:
import gzip
import logging
import os
from shutil import copy2
from time import time
def logRoll(logfile_name):
log_backup_name = logfile_name + '.' + str(int(time()))
try:
copy2(logfile_name, log_backup_name)
except IOError, err:
logging.debug(' No logfile to roll')
return
f_in = open(log_backup_name, 'rb')
f_out = gzip.open(log_backup_name + '.gz', 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
os.remove(log_backup_name)
f=open(logfile_name, 'w')
f.close()
我想现在参加聚会已经太晚了,但我做的就是这样。我创建了一个新类,继承了
logging.handlers.RotatingFileHandler
类,并在移动文件之前向gzip文件添加了几行代码
我认为最好的选择是使用当前的
TimedRotatingFileHandler
实现,将日志文件重命名为旋转版本后,只需压缩它:
import zipfile
import os
from logging.handlers import TimedRotatingFileHandler
class TimedCompressedRotatingFileHandler(TimedRotatingFileHandler):
"""
Extended version of TimedRotatingFileHandler that compress logs on rollover.
"""
def find_last_rotated_file(self):
dir_name, base_name = os.path.split(self.baseFilename)
file_names = os.listdir(dir_name)
result = []
prefix = '{}.20'.format(base_name) # we want to find a rotated file with eg filename.2017-12-12... name
for file_name in file_names:
if file_name.startswith(prefix) and not file_name.endswith('.zip'):
result.append(file_name)
result.sort()
return result[0]
def doRollover(self):
super(TimedCompressedRotatingFileHandler, self).doRollover()
dfn = self.find_last_rotated_file()
dfn_zipped = '{}.zip'.format(dfn)
if os.path.exists(dfn_zipped):
os.remove(dfn_zipped)
with zipfile.ZipFile(dfn_zipped, 'w') as f:
f.write(dfn, dfn_zipped, zipfile.ZIP_DEFLATED)
os.remove(dfn)
请注意:python 3中的类签名已更改。下面是我的python 3.6工作示例
import logging.handlers
import os
import zlib
def namer(name):
return name + ".gz"
def rotator(source, dest):
print(f'compressing {source} -> {dest}')
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
err_handler = logging.handlers.TimedRotatingFileHandler('/data/errors.log', when="M", interval=1,
encoding='utf-8', backupCount=30, utc=True)
err_handler.rotator = rotator
err_handler.namer = namer
logger = logging.getLogger("Rotating Log")
logger.setLevel(logging.ERROR)
logger.addHandler(err_handler)
这是我的解决方案(从中修改),简单,在gzip处理大型日志文件时不会阻止python代码:
class GZipRotator:
def __call__(self, source, dest):
os.rename(source, dest)
subprocess.Popen(['gzip', dest])
我在下面添加了一个解决方案,我基本上是压缩旧的
将日志备份到zip,并在其上添加时间戳。使用一个额外变量
叫ZipbackupCount要压缩的旧文件数
e、 我们有这样的日志。(备份数量=5,ZipBackCount=2)
a、 日志1
a、 日志2
a、 日志3
a、 日志4
a、 log.11-09-2020-11-11-11.zip
一旦备份日志计数达到5,它将触发将a.log.5和a.log.4压缩到zip以上并继续
轮换?你是说每天都要运行脚本吗?如果是这样的话,我建议使用cron作业。我的应用程序将持续运行和记录,因此我希望系统每天启动一个新的日志文件。如果IMHO中的示例比较干净,将重命名与压缩分离(但指针为+1),这是可行的,但可以获得+1,如果进程正在运行,您需要使用
或最终构建测试此工作。如果将“when”设置为午夜,并且此脚本在may前后未运行,则日志文件将不会旋转,日志记录将保留在当前日志中。
class GZipRotator:
def __call__(self, source, dest):
os.rename(source, dest)
subprocess.Popen(['gzip', dest])
import os
import datetime
import gzip
import logging.handlers
import zipfile
from config.config import PROJECT_PATH, LOG_DIR, LOG_FILE_NAME, LOG_FILESIZE
class NewRotatingFileHandler(logging.handlers.RotatingFileHandler):
def __init__(self, filename, **kws):
backupCount = kws.get('backupCount', 0)
self.backup_count = backupCount
self.ZipbackupCount = kws.pop('ZipbackupCount', 0)
self.file_name = filename
self.log_dir = os.path.split(self.file_name)[0]
self.log_file_name = os.path.split(self.file_name)[-1]
logging.handlers.RotatingFileHandler.__init__(self, filename, **kws)
def doArchive(self, old_log):
with open(old_log) as log:
with gzip.open(old_log + '.gz', 'wb') as comp_log:
comp_log.writelines(log)
os.remove(old_log)
def doRollover(self):
super(NewRotatingFileHandler, self).doRollover()
zip_file_name = self.log_file_name + "." + datetime.datetime.now().strftime("%d-%m-%Y-%H-%M-%S") + ".zip"
if os.path.exists(self.rotation_filename("%s.%d" % (self.baseFilename, self.backupCount))) and self.ZipbackupCount > 0 and self.file_name:
with zipfile.ZipFile(os.path.join(self.log_dir, zip_file_name), "w", zipfile.ZIP_DEFLATED, allowZip64=True) as zf:
for i in range(self.backupCount, self.backupCount - self.ZipbackupCount, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
if os.path.exists(sfn):
zf.write(sfn, "%s.%d" % (self.log_file_name, i))
os.remove(sfn)
else:
continue
zf.close()
# handler = NewRotatingFileHandler(filename=os.path.join(PROJECT_PATH, LOG_DIR, LOG_FILE_NAME),
# maxBytes=LOG_FILESIZE, backupCount=5, ZipbackupCount=2)
#
# handler.doRollover()