1
# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
3
# Permission to use, copy, modify, and distribute this software and its
4
# documentation for any purpose and without fee is hereby granted,
5
# provided that the above copyright notice appear in all copies and that
6
# both that copyright notice and this permission notice appear in
7
# supporting documentation, and that the name of Vinay Sajip
8
# not be used in advertising or publicity pertaining to distribution
9
# of the software without specific, written prior permission.
10
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
11
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
12
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
13
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
14
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
Additional handlers for the logging package for Python. The core package is
19
based on PEP 282 and comments thereto in comp.lang.python.
21
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
23
To use, simply 'import logging.handlers' and log away!
26
import errno, logging, socket, os, pickle, struct, time, re
27
from codecs import BOM_UTF8
28
from stat import ST_DEV, ST_INO, ST_MTIME
32
except ImportError: #pragma: no cover
39
DEFAULT_TCP_LOGGING_PORT = 9020
40
DEFAULT_UDP_LOGGING_PORT = 9021
41
DEFAULT_HTTP_LOGGING_PORT = 9022
42
DEFAULT_SOAP_LOGGING_PORT = 9023
46
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
48
class BaseRotatingHandler(logging.FileHandler):
50
Base class for handlers that rotate log files at a certain point.
51
Not meant to be instantiated directly. Instead, use RotatingFileHandler
52
or TimedRotatingFileHandler.
54
def __init__(self, filename, mode, encoding=None, delay=False):
56
Use the specified filename for streamed logging
58
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
60
self.encoding = encoding
64
def emit(self, record):
68
Output the record to the file, catering for rollover as described
72
if self.shouldRollover(record):
74
logging.FileHandler.emit(self, record)
76
self.handleError(record)
78
def rotation_filename(self, default_name):
80
Modify the filename of a log file when rotating.
82
This is provided so that a custom filename can be provided.
84
The default implementation calls the 'namer' attribute of the
85
handler, if it's callable, passing the default name to
86
it. If the attribute isn't callable (the default is None), the name
87
is returned unchanged.
89
:param default_name: The default name for the log file.
91
if not callable(self.namer):
94
result = self.namer(default_name)
97
def rotate(self, source, dest):
99
When rotating, rotate the current log.
101
The default implementation calls the 'rotator' attribute of the
102
handler, if it's callable, passing the source and dest arguments to
103
it. If the attribute isn't callable (the default is None), the source
104
is simply renamed to the destination.
106
:param source: The source filename. This is normally the base
107
filename, e.g. 'test.log'
108
:param dest: The destination filename. This is normally
109
what the source is rotated to, e.g. 'test.log.1'.
111
if not callable(self.rotator):
112
# Issue 18940: A file may not have been created if delay is True.
113
if os.path.exists(source):
114
os.rename(source, dest)
116
self.rotator(source, dest)
118
class RotatingFileHandler(BaseRotatingHandler):
120
Handler for logging to a set of files, which switches from one file
121
to the next when the current file reaches a certain size.
123
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
125
Open the specified file and use it as the stream for logging.
127
By default, the file grows indefinitely. You can specify particular
128
values of maxBytes and backupCount to allow the file to rollover at
129
a predetermined size.
131
Rollover occurs whenever the current log file is nearly maxBytes in
132
length. If backupCount is >= 1, the system will successively create
133
new files with the same pathname as the base file, but with extensions
134
".1", ".2" etc. appended to it. For example, with a backupCount of 5
135
and a base file name of "app.log", you would get "app.log",
136
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
137
written to is always "app.log" - when it gets filled up, it is closed
138
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
139
exist, then they are renamed to "app.log.2", "app.log.3" etc.
142
If maxBytes is zero, rollover never occurs.
144
# If rotation/rollover is wanted, it doesn't make sense to use another
145
# mode. If for example 'w' were specified, then if there were multiple
146
# runs of the calling application, the logs from previous runs would be
147
# lost if the 'w' is respected, because the log file would be truncated
151
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
152
self.maxBytes = maxBytes
153
self.backupCount = backupCount
155
def doRollover(self):
157
Do a rollover, as described in __init__().
162
if self.backupCount > 0:
163
for i in range(self.backupCount - 1, 0, -1):
164
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
165
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
167
if os.path.exists(sfn):
168
if os.path.exists(dfn):
171
dfn = self.rotation_filename(self.baseFilename + ".1")
172
if os.path.exists(dfn):
174
self.rotate(self.baseFilename, dfn)
176
self.stream = self._open()
178
def shouldRollover(self, record):
180
Determine if rollover should occur.
182
Basically, see if the supplied record would cause the file to exceed
183
the size limit we have.
185
if self.stream is None: # delay was set...
186
self.stream = self._open()
187
if self.maxBytes > 0: # are we rolling over?
188
msg = "%s\n" % self.format(record)
189
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
190
if self.stream.tell() + len(msg) >= self.maxBytes:
194
class TimedRotatingFileHandler(BaseRotatingHandler):
196
Handler for logging to a file, rotating the log file at certain timed
199
If backupCount is > 0, when rollover is done, no more than backupCount
200
files are kept - the oldest ones are deleted.
202
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None):
203
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
204
self.when = when.upper()
205
self.backupCount = backupCount
208
# Calculate the real rollover interval, which is just the number of
209
# seconds between rollovers. Also set the filename suffix used when
210
# a rollover occurs. Current 'when' events supported:
215
# midnight - roll over at midnight
216
# W{0-6} - roll over on a certain day; 0 - Monday
218
# Case of the 'when' specifier is not important; lower or upper case
221
self.interval = 1 # one second
222
self.suffix = "%Y-%m-%d_%H-%M-%S"
223
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
224
elif self.when == 'M':
225
self.interval = 60 # one minute
226
self.suffix = "%Y-%m-%d_%H-%M"
227
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
228
elif self.when == 'H':
229
self.interval = 60 * 60 # one hour
230
self.suffix = "%Y-%m-%d_%H"
231
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
232
elif self.when == 'D' or self.when == 'MIDNIGHT':
233
self.interval = 60 * 60 * 24 # one day
234
self.suffix = "%Y-%m-%d"
235
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
236
elif self.when.startswith('W'):
237
self.interval = 60 * 60 * 24 * 7 # one week
238
if len(self.when) != 2:
239
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
240
if self.when[1] < '0' or self.when[1] > '6':
241
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
242
self.dayOfWeek = int(self.when[1])
243
self.suffix = "%Y-%m-%d"
244
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
246
raise ValueError("Invalid rollover interval specified: %s" % self.when)
248
self.extMatch = re.compile(self.extMatch, re.ASCII)
249
self.interval = self.interval * interval # multiply by units requested
250
if os.path.exists(filename):
251
t = os.stat(filename)[ST_MTIME]
254
self.rolloverAt = self.computeRollover(t)
256
def computeRollover(self, currentTime):
258
Work out the rollover time based on the specified time.
260
result = currentTime + self.interval
261
# If we are rolling over at midnight or weekly, then the interval is already known.
262
# What we need to figure out is WHEN the next interval is. In other words,
263
# if you are rolling over at midnight, then your base interval is 1 day,
264
# but you want to start that one day clock at midnight, not now. So, we
265
# have to fudge the rolloverAt value in order to trigger the first rollover
266
# at the right time. After that, the regular interval will take care of
267
# the rest. Note that this code doesn't care about leap seconds. :)
268
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
269
# This could be done with less code, but I wanted it to be clear
271
t = time.gmtime(currentTime)
273
t = time.localtime(currentTime)
278
# r is the number of seconds left between now and the next rotation
279
if self.atTime is None:
280
rotate_ts = _MIDNIGHT
282
rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
285
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
288
# Rotate time is before the current time (for example when
289
# self.rotateAt is 13:45 and it now 14:15), rotation is
292
currentDay = (currentDay + 1) % 7
293
result = currentTime + r
294
# If we are rolling over on a certain day, add in the number of days until
295
# the next rollover, but offset by 1 since we just calculated the time
296
# until the next day starts. There are three cases:
297
# Case 1) The day to rollover is today; in this case, do nothing
298
# Case 2) The day to rollover is further in the interval (i.e., today is
299
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
300
# next rollover is simply 6 - 2 - 1, or 3.
301
# Case 3) The day to rollover is behind us in the interval (i.e., today
302
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
303
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
304
# number of days left in the current week (1) plus the number
305
# of days in the next week until the rollover day (3).
306
# The calculations described in 2) and 3) above need to have a day added.
307
# This is because the above time calculation takes us to midnight on this
308
# day, i.e. the start of the next day.
309
if self.when.startswith('W'):
310
day = currentDay # 0 is Monday
311
if day != self.dayOfWeek:
312
if day < self.dayOfWeek:
313
daysToWait = self.dayOfWeek - day
315
daysToWait = 6 - day + self.dayOfWeek + 1
316
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
319
dstAtRollover = time.localtime(newRolloverAt)[-1]
320
if dstNow != dstAtRollover:
321
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
323
else: # DST bows out before next rollover, so we need to add an hour
325
newRolloverAt += addend
326
result = newRolloverAt
329
def shouldRollover(self, record):
331
Determine if rollover should occur.
333
record is not used, as we are just comparing times, but it is needed so
334
the method signatures are the same
337
if t >= self.rolloverAt:
341
def getFilesToDelete(self):
343
Determine the files to delete when rolling over.
345
More specific than the earlier method, which just used glob.glob().
347
dirName, baseName = os.path.split(self.baseFilename)
348
fileNames = os.listdir(dirName)
350
prefix = baseName + "."
352
for fileName in fileNames:
353
if fileName[:plen] == prefix:
354
suffix = fileName[plen:]
355
if self.extMatch.match(suffix):
356
result.append(os.path.join(dirName, fileName))
358
if len(result) < self.backupCount:
361
result = result[:len(result) - self.backupCount]
364
def doRollover(self):
366
do a rollover; in this case, a date/time stamp is appended to the filename
367
when the rollover happens. However, you want the file to be named for the
368
start of the interval, not the current time. If there is a backup count,
369
then we have to get a list of matching filenames, sort them and remove
370
the one with the oldest suffix.
375
# get the time that this sequence started at and make it a TimeTuple
376
currentTime = int(time.time())
377
dstNow = time.localtime(currentTime)[-1]
378
t = self.rolloverAt - self.interval
380
timeTuple = time.gmtime(t)
382
timeTuple = time.localtime(t)
383
dstThen = timeTuple[-1]
384
if dstNow != dstThen:
389
timeTuple = time.localtime(t + addend)
390
dfn = self.rotation_filename(self.baseFilename + "." +
391
time.strftime(self.suffix, timeTuple))
392
if os.path.exists(dfn):
394
self.rotate(self.baseFilename, dfn)
395
if self.backupCount > 0:
396
for s in self.getFilesToDelete():
399
self.stream = self._open()
400
newRolloverAt = self.computeRollover(currentTime)
401
while newRolloverAt <= currentTime:
402
newRolloverAt = newRolloverAt + self.interval
403
#If DST changes and midnight or weekly rollover, adjust for this.
404
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
405
dstAtRollover = time.localtime(newRolloverAt)[-1]
406
if dstNow != dstAtRollover:
407
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
409
else: # DST bows out before next rollover, so we need to add an hour
411
newRolloverAt += addend
412
self.rolloverAt = newRolloverAt
414
class WatchedFileHandler(logging.FileHandler):
416
A handler for logging to a file, which watches the file
417
to see if it has changed while in use. This can happen because of
418
usage of programs such as newsyslog and logrotate which perform
419
log file rotation. This handler, intended for use under Unix,
420
watches the file to see if it has changed since the last emit.
421
(A file has changed if its device or inode have changed.)
422
If it has changed, the old file stream is closed, and the file
423
opened to get a new stream.
425
This handler is not appropriate for use under Windows, because
426
under Windows open files cannot be moved or renamed - logging
427
opens the files with exclusive locks - and so there is no need
428
for such a handler. Furthermore, ST_INO is not supported under
429
Windows; stat always returns zero for this value.
431
This handler is based on a suggestion and patch by Chad J.
434
def __init__(self, filename, mode='a', encoding=None, delay=False):
435
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
436
self.dev, self.ino = -1, -1
439
def _statstream(self):
441
sres = os.fstat(self.stream.fileno())
442
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
444
def emit(self, record):
448
First check if the underlying file has changed, and if it
449
has, close the old stream and reopen the file to get the
452
# Reduce the chance of race conditions by stat'ing by path only
453
# once and then fstat'ing our new fd if we opened a new log stream.
454
# See issue #14632: Thanks to John Mulligan for the problem report
457
# stat the file by path, checking for existence
458
sres = os.stat(self.baseFilename)
459
except FileNotFoundError:
461
# compare file system stat with that of our stream file handle
462
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
463
if self.stream is not None:
464
# we have an open file handle, clean it up
467
# open a new file handle and get new stat info from that fd
468
self.stream = self._open()
470
logging.FileHandler.emit(self, record)
473
class SocketHandler(logging.Handler):
475
A handler class which writes logging records, in pickle format, to
476
a streaming socket. The socket is kept open across logging calls.
477
If the peer resets it, an attempt is made to reconnect on the next call.
478
The pickle which is sent is that of the LogRecord's attribute dictionary
479
(__dict__), so that the receiver does not need to have the logging module
480
installed in order to process the logging event.
482
To unpickle the record at the receiving end into a LogRecord, use the
483
makeLogRecord function.
486
def __init__(self, host, port):
488
Initializes the handler with a specific host address and port.
490
When the attribute *closeOnError* is set to True - if a socket error
491
occurs, the socket is silently closed and then reopened on the next
494
logging.Handler.__init__(self)
500
self.address = (host, port)
502
self.closeOnError = False
503
self.retryTime = None
505
# Exponential backoff parameters.
507
self.retryStart = 1.0
509
self.retryFactor = 2.0
511
def makeSocket(self, timeout=1):
513
A factory method which allows subclasses to define the precise
514
type of socket they want.
516
if self.port is not None:
517
result = socket.create_connection(self.address, timeout=timeout)
519
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
520
result.settimeout(timeout)
522
result.connect(self.address)
524
result.close() # Issue 19182
528
def createSocket(self):
530
Try to create a socket, using an exponential backoff with
531
a max retry time. Thanks to Robert Olson for the original patch
532
(SF #815911) which has been slightly refactored.
535
# Either retryTime is None, in which case this
536
# is the first time back after a disconnect, or
537
# we've waited long enough.
538
if self.retryTime is None:
541
attempt = (now >= self.retryTime)
544
self.sock = self.makeSocket()
545
self.retryTime = None # next time, no delay before trying
547
#Creation failed, so set the retry time and return.
548
if self.retryTime is None:
549
self.retryPeriod = self.retryStart
551
self.retryPeriod = self.retryPeriod * self.retryFactor
552
if self.retryPeriod > self.retryMax:
553
self.retryPeriod = self.retryMax
554
self.retryTime = now + self.retryPeriod
558
Send a pickled string to the socket.
560
This function allows for partial sends which can happen when the
563
if self.sock is None:
565
#self.sock can be None either because we haven't reached the retry
566
#time yet, or because we have reached the retry time and retried,
567
#but are still unable to connect.
571
except OSError: #pragma: no cover
573
self.sock = None # so we can call createSocket next time
575
def makePickle(self, record):
577
Pickles the record in binary format with a length prefix, and
578
returns it ready for transmission across the socket.
582
# just to get traceback text into record.exc_text ...
583
dummy = self.format(record)
584
# See issue #14436: If msg or args are objects, they may not be
585
# available on the receiving end. So we convert the msg % args
586
# to a string, save it as msg and zap the args.
587
d = dict(record.__dict__)
588
d['msg'] = record.getMessage()
591
s = pickle.dumps(d, 1)
592
slen = struct.pack(">L", len(s))
595
def handleError(self, record):
597
Handle an error during logging.
599
An error has occurred during logging. Most likely cause -
600
connection lost. Close the socket so that we can retry on the
603
if self.closeOnError and self.sock:
605
self.sock = None #try to reconnect next time
607
logging.Handler.handleError(self, record)
609
def emit(self, record):
613
Pickles the record and writes it to the socket in binary format.
614
If there is an error with the socket, silently drop the packet.
615
If there was a problem with the socket, re-establishes the
619
s = self.makePickle(record)
622
self.handleError(record)
633
logging.Handler.close(self)
637
class DatagramHandler(SocketHandler):
639
A handler class which writes logging records, in pickle format, to
640
a datagram socket. The pickle which is sent is that of the LogRecord's
641
attribute dictionary (__dict__), so that the receiver does not need to
642
have the logging module installed in order to process the logging event.
644
To unpickle the record at the receiving end into a LogRecord, use the
645
makeLogRecord function.
648
def __init__(self, host, port):
650
Initializes the handler with a specific host address and port.
652
SocketHandler.__init__(self, host, port)
653
self.closeOnError = False
655
def makeSocket(self):
657
The factory method of SocketHandler is here overridden to create
658
a UDP socket (SOCK_DGRAM).
660
if self.port is None:
661
family = socket.AF_UNIX
663
family = socket.AF_INET
664
s = socket.socket(family, socket.SOCK_DGRAM)
669
Send a pickled string to a socket.
671
This function no longer allows for partial sends which can happen
672
when the network is busy - UDP does not guarantee delivery and
673
can deliver packets out of sequence.
675
if self.sock is None:
677
self.sock.sendto(s, self.address)
679
class SysLogHandler(logging.Handler):
681
A handler class which sends formatted logging records to a syslog
682
server. Based on Sam Rushing's syslog module:
683
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
684
Contributed by Nicolas Untz (after which minor refactoring changes
688
# from <linux/sys/syslog.h>:
689
# ======================================================================
690
# priorities/facilities are encoded into a single 32-bit quantity, where
691
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
692
# facility (0-big number). Both the priorities and the facilities map
693
# roughly one-to-one to strings in the syslogd(8) source code. This
694
# mapping is included in this file.
696
# priorities (these are ordered)
698
LOG_EMERG = 0 # system is unusable
699
LOG_ALERT = 1 # action must be taken immediately
700
LOG_CRIT = 2 # critical conditions
701
LOG_ERR = 3 # error conditions
702
LOG_WARNING = 4 # warning conditions
703
LOG_NOTICE = 5 # normal but significant condition
704
LOG_INFO = 6 # informational
705
LOG_DEBUG = 7 # debug-level messages
708
LOG_KERN = 0 # kernel messages
709
LOG_USER = 1 # random user-level messages
710
LOG_MAIL = 2 # mail system
711
LOG_DAEMON = 3 # system daemons
712
LOG_AUTH = 4 # security/authorization messages
713
LOG_SYSLOG = 5 # messages generated internally by syslogd
714
LOG_LPR = 6 # line printer subsystem
715
LOG_NEWS = 7 # network news subsystem
716
LOG_UUCP = 8 # UUCP subsystem
717
LOG_CRON = 9 # clock daemon
718
LOG_AUTHPRIV = 10 # security/authorization messages (private)
719
LOG_FTP = 11 # FTP daemon
721
# other codes through 15 reserved for system use
722
LOG_LOCAL0 = 16 # reserved for local use
723
LOG_LOCAL1 = 17 # reserved for local use
724
LOG_LOCAL2 = 18 # reserved for local use
725
LOG_LOCAL3 = 19 # reserved for local use
726
LOG_LOCAL4 = 20 # reserved for local use
727
LOG_LOCAL5 = 21 # reserved for local use
728
LOG_LOCAL6 = 22 # reserved for local use
729
LOG_LOCAL7 = 23 # reserved for local use
734
"critical": LOG_CRIT,
738
"error": LOG_ERR, # DEPRECATED
740
"notice": LOG_NOTICE,
741
"panic": LOG_EMERG, # DEPRECATED
742
"warn": LOG_WARNING, # DEPRECATED
743
"warning": LOG_WARNING,
748
"authpriv": LOG_AUTHPRIV,
750
"daemon": LOG_DAEMON,
756
"security": LOG_AUTH, # DEPRECATED
757
"syslog": LOG_SYSLOG,
760
"local0": LOG_LOCAL0,
761
"local1": LOG_LOCAL1,
762
"local2": LOG_LOCAL2,
763
"local3": LOG_LOCAL3,
764
"local4": LOG_LOCAL4,
765
"local5": LOG_LOCAL5,
766
"local6": LOG_LOCAL6,
767
"local7": LOG_LOCAL7,
770
#The map below appears to be trivially lowercasing the key. However,
771
#there's more to it than meets the eye - in some locales, lowercasing
772
#gives unexpected results. See SF #1524081: in the Turkish locale,
773
#"INFO".lower() != "info"
777
"WARNING" : "warning",
779
"CRITICAL" : "critical"
782
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
783
facility=LOG_USER, socktype=None):
785
Initialize a handler.
787
If address is specified as a string, a UNIX socket is used. To log to a
788
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
789
If facility is not specified, LOG_USER is used. If socktype is
790
specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
791
socket type will be used. For Unix sockets, you can also specify a
792
socktype of None, in which case socket.SOCK_DGRAM will be used, falling
793
back to socket.SOCK_STREAM.
795
logging.Handler.__init__(self)
797
self.address = address
798
self.facility = facility
799
self.socktype = socktype
801
if isinstance(address, str):
802
self.unixsocket = True
803
self._connect_unixsocket(address)
805
self.unixsocket = False
807
socktype = socket.SOCK_DGRAM
808
self.socket = socket.socket(socket.AF_INET, socktype)
809
if socktype == socket.SOCK_STREAM:
810
self.socket.connect(address)
811
self.socktype = socktype
812
self.formatter = None
814
def _connect_unixsocket(self, address):
815
use_socktype = self.socktype
816
if use_socktype is None:
817
use_socktype = socket.SOCK_DGRAM
818
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
820
self.socket.connect(address)
821
# it worked, so set self.socktype to the used type
822
self.socktype = use_socktype
825
if self.socktype is not None:
826
# user didn't specify falling back, so fail
828
use_socktype = socket.SOCK_STREAM
829
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
831
self.socket.connect(address)
832
# it worked, so set self.socktype to the used type
833
self.socktype = use_socktype
838
def encodePriority(self, facility, priority):
840
Encode the facility and priority. You can pass in strings or
841
integers - if strings are passed, the facility_names and
842
priority_names mapping dictionaries are used to convert them to
845
if isinstance(facility, str):
846
facility = self.facility_names[facility]
847
if isinstance(priority, str):
848
priority = self.priority_names[priority]
849
return (facility << 3) | priority
858
logging.Handler.close(self)
862
def mapPriority(self, levelName):
864
Map a logging level name to a key in the priority_names map.
865
This is useful in two scenarios: when custom levels are being
866
used, and in the case where you can't do a straightforward
867
mapping by lowercasing the logging level name because of locale-
868
specific issues (see SF #1524081).
870
return self.priority_map.get(levelName, "warning")
872
ident = '' # prepended to all messages
873
append_nul = True # some old syslog daemons expect a NUL terminator
875
def emit(self, record):
879
The record is formatted, and then sent to the syslog server. If
880
exception information is present, it is NOT sent to the server.
882
msg = self.format(record)
884
msg = self.ident + msg
888
# We need to convert record level to lowercase, maybe this will
889
# change in the future.
890
prio = '<%d>' % self.encodePriority(self.facility,
891
self.mapPriority(record.levelname))
892
prio = prio.encode('utf-8')
893
# Message is a string. Convert to bytes as required by RFC 5424
894
msg = msg.encode('utf-8')
899
self.socket.send(msg)
902
self._connect_unixsocket(self.address)
903
self.socket.send(msg)
904
elif self.socktype == socket.SOCK_DGRAM:
905
self.socket.sendto(msg, self.address)
907
self.socket.sendall(msg)
909
self.handleError(record)
911
class SMTPHandler(logging.Handler):
913
A handler class which sends an SMTP email for each logging event.
915
def __init__(self, mailhost, fromaddr, toaddrs, subject,
916
credentials=None, secure=None, timeout=5.0):
918
Initialize the handler.
920
Initialize the instance with the from and to addresses and subject
921
line of the email. To specify a non-standard SMTP port, use the
922
(host, port) tuple format for the mailhost argument. To specify
923
authentication credentials, supply a (username, password) tuple
924
for the credentials argument. To specify the use of a secure
925
protocol (TLS), pass in a tuple for the secure argument. This will
926
only be used when authentication credentials are supplied. The tuple
927
will be either an empty tuple, or a single-value tuple with the name
928
of a keyfile, or a 2-value tuple with the names of the keyfile and
929
certificate file. (This tuple is passed to the `starttls` method).
930
A timeout in seconds can be specified for the SMTP connection (the
931
default is one second).
933
logging.Handler.__init__(self)
934
if isinstance(mailhost, tuple):
935
self.mailhost, self.mailport = mailhost
937
self.mailhost, self.mailport = mailhost, None
938
if isinstance(credentials, tuple):
939
self.username, self.password = credentials
942
self.fromaddr = fromaddr
943
if isinstance(toaddrs, str):
945
self.toaddrs = toaddrs
946
self.subject = subject
948
self.timeout = timeout
950
def getSubject(self, record):
952
Determine the subject for the email.
954
If you want to specify a subject line which is record-dependent,
955
override this method.
959
def emit(self, record):
963
Format the record and send it to the specified addressees.
967
from email.utils import formatdate
970
port = smtplib.SMTP_PORT
971
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
972
msg = self.format(record)
973
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
975
",".join(self.toaddrs),
976
self.getSubject(record),
979
if self.secure is not None:
981
smtp.starttls(*self.secure)
983
smtp.login(self.username, self.password)
984
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
987
self.handleError(record)
989
class NTEventLogHandler(logging.Handler):
991
A handler class which sends events to the NT Event Log. Adds a
992
registry entry for the specified application name. If no dllname is
993
provided, win32service.pyd (which contains some basic message
994
placeholders) is used. Note that use of these placeholders will make
995
your event logs big, as the entire message source is held in the log.
996
If you want slimmer logs, you have to pass in the name of your own DLL
997
which contains the message definitions you want to use in the event log.
999
def __init__(self, appname, dllname=None, logtype="Application"):
1000
logging.Handler.__init__(self)
1002
import win32evtlogutil, win32evtlog
1003
self.appname = appname
1004
self._welu = win32evtlogutil
1006
dllname = os.path.split(self._welu.__file__)
1007
dllname = os.path.split(dllname[0])
1008
dllname = os.path.join(dllname[0], r'win32service.pyd')
1009
self.dllname = dllname
1010
self.logtype = logtype
1011
self._welu.AddSourceToRegistry(appname, dllname, logtype)
1012
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
1014
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1015
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1016
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
1017
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
1018
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
1021
print("The Python Win32 extensions for NT (service, event "\
1022
"logging) appear not to be available.")
1025
def getMessageID(self, record):
1027
Return the message ID for the event record. If you are using your
1028
own messages, you could do this by having the msg passed to the
1029
logger being an ID rather than a formatting string. Then, in here,
1030
you could use a dictionary lookup to get the message ID. This
1031
version returns 1, which is the base message ID in win32service.pyd.
1035
def getEventCategory(self, record):
1037
Return the event category for the record.
1039
Override this if you want to specify your own categories. This version
1044
def getEventType(self, record):
1046
Return the event type for the record.
1048
Override this if you want to specify your own types. This version does
1049
a mapping using the handler's typemap attribute, which is set up in
1050
__init__() to a dictionary which contains mappings for DEBUG, INFO,
1051
WARNING, ERROR and CRITICAL. If you are using your own levels you will
1052
either need to override this method or place a suitable dictionary in
1053
the handler's typemap attribute.
1055
return self.typemap.get(record.levelno, self.deftype)
1057
def emit(self, record):
1061
Determine the message ID, event category and event type. Then
1062
log the message in the NT event log.
1066
id = self.getMessageID(record)
1067
cat = self.getEventCategory(record)
1068
type = self.getEventType(record)
1069
msg = self.format(record)
1070
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
1072
self.handleError(record)
1076
Clean up this handler.
1078
You can remove the application name from the registry as a
1079
source of event log entries. However, if you do this, you will
1080
not be able to see the events as you intended in the Event Log
1081
Viewer - it needs to be able to access the registry to get the
1084
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
1085
logging.Handler.close(self)
1087
class HTTPHandler(logging.Handler):
1089
A class which sends records to a Web server, using either GET or
1092
def __init__(self, host, url, method="GET", secure=False, credentials=None):
1094
Initialize the instance with the host, the request URL, and the method
1097
logging.Handler.__init__(self)
1098
method = method.upper()
1099
if method not in ["GET", "POST"]:
1100
raise ValueError("method must be GET or POST")
1103
self.method = method
1104
self.secure = secure
1105
self.credentials = credentials
1107
def mapLogRecord(self, record):
1109
Default implementation of mapping the log record into a dict
1110
that is sent as the CGI data. Overwrite in your class.
1111
Contributed by Franz Glasner.
1113
return record.__dict__
1115
def emit(self, record):
1119
Send the record to the Web server as a percent-encoded dictionary
1122
import http.client, urllib.parse
1125
h = http.client.HTTPSConnection(host)
1127
h = http.client.HTTPConnection(host)
1129
data = urllib.parse.urlencode(self.mapLogRecord(record))
1130
if self.method == "GET":
1131
if (url.find('?') >= 0):
1135
url = url + "%c%s" % (sep, data)
1136
h.putrequest(self.method, url)
1137
# support multiple hosts on one IP address...
1138
# need to strip optional :port from host, if present
1142
h.putheader("Host", host)
1143
if self.method == "POST":
1144
h.putheader("Content-type",
1145
"application/x-www-form-urlencoded")
1146
h.putheader("Content-length", str(len(data)))
1147
if self.credentials:
1149
s = ('u%s:%s' % self.credentials).encode('utf-8')
1150
s = 'Basic ' + base64.b64encode(s).strip()
1151
h.putheader('Authorization', s)
1153
if self.method == "POST":
1154
h.send(data.encode('utf-8'))
1155
h.getresponse() #can't do anything with the result
1157
self.handleError(record)
1159
class BufferingHandler(logging.Handler):
1161
A handler class which buffers logging records in memory. Whenever each
1162
record is added to the buffer, a check is made to see if the buffer should
1163
be flushed. If it should, then flush() is expected to do what's needed.
1165
def __init__(self, capacity):
1167
Initialize the handler with the buffer size.
1169
logging.Handler.__init__(self)
1170
self.capacity = capacity
1173
def shouldFlush(self, record):
1175
Should the handler flush its buffer?
1177
Returns true if the buffer is up to capacity. This method can be
1178
overridden to implement custom flushing strategies.
1180
return (len(self.buffer) >= self.capacity)
1182
def emit(self, record):
1186
Append the record. If shouldFlush() tells us to, call flush() to process
1189
self.buffer.append(record)
1190
if self.shouldFlush(record):
1195
Override to implement custom flushing behaviour.
1197
This version just zaps the buffer to empty.
1209
This version just flushes and chains to the parent class' close().
1212
logging.Handler.close(self)
1214
class MemoryHandler(BufferingHandler):
1216
A handler class which buffers logging records in memory, periodically
1217
flushing them to a target handler. Flushing occurs whenever the buffer
1218
is full, or when an event of a certain severity or greater is seen.
1220
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
1222
Initialize the handler with the buffer size, the level at which
1223
flushing should occur and an optional target.
1225
Note that without a target being set either here or via setTarget(),
1226
a MemoryHandler is no use to anyone!
1228
BufferingHandler.__init__(self, capacity)
1229
self.flushLevel = flushLevel
1230
self.target = target
1232
def shouldFlush(self, record):
1234
Check for buffer full or a record at the flushLevel or higher.
1236
return (len(self.buffer) >= self.capacity) or \
1237
(record.levelno >= self.flushLevel)
1239
def setTarget(self, target):
1241
Set the target handler for this handler.
1243
self.target = target
1247
For a MemoryHandler, flushing means just sending the buffered
1248
records to the target, if there is one. Override if you want
1249
different behaviour.
1251
The record buffer is also cleared by this operation.
1256
for record in self.buffer:
1257
self.target.handle(record)
1264
Flush, set the target to None and lose the buffer.
1270
BufferingHandler.close(self)
1275
class QueueHandler(logging.Handler):
1277
This handler sends events to a queue. Typically, it would be used together
1278
with a multiprocessing Queue to centralise logging to file in one process
1279
(in a multi-process application), so as to avoid file write contention
1282
This code is new in Python 3.2, but this class can be copy pasted into
1283
user code for use with earlier Python versions.
1286
def __init__(self, queue):
1288
Initialise an instance, using the passed queue.
1290
logging.Handler.__init__(self)
1293
def enqueue(self, record):
1297
The base implementation uses put_nowait. You may want to override
1298
this method if you want to use blocking, timeouts or custom queue
1301
self.queue.put_nowait(record)
1303
def prepare(self, record):
1305
Prepares a record for queuing. The object returned by this method is
1308
The base implementation formats the record to merge the message
1309
and arguments, and removes unpickleable items from the record
1312
You might want to override this method if you want to convert
1313
the record to a dict or JSON string, or send a modified copy
1314
of the record while leaving the original intact.
1316
# The format operation gets traceback text into record.exc_text
1317
# (if there's exception data), and also puts the message into
1318
# record.message. We can then use this to replace the original
1319
# msg + args, as these might be unpickleable. We also zap the
1320
# exc_info attribute, as it's no longer needed and, if not None,
1321
# will typically not be pickleable.
1323
record.msg = record.message
1325
record.exc_info = None
1328
def emit(self, record):
1332
Writes the LogRecord to the queue, preparing it for pickling first.
1335
self.enqueue(self.prepare(record))
1337
self.handleError(record)
1340
class QueueListener(object):
1342
This class implements an internal threaded listener which watches for
1343
LogRecords being added to a queue, removes them and passes them to a
1344
list of handlers for processing.
1348
def __init__(self, queue, *handlers):
1350
Initialise an instance with the specified queue and
1354
self.handlers = handlers
1355
self._stop = threading.Event()
1358
def dequeue(self, block):
1360
Dequeue a record and return it, optionally blocking.
1362
The base implementation uses get. You may want to override this method
1363
if you want to use timeouts or work with custom queue implementations.
1365
return self.queue.get(block)
1371
This starts up a background thread to monitor the queue for
1372
LogRecords to process.
1374
self._thread = t = threading.Thread(target=self._monitor)
1378
def prepare(self , record):
1380
Prepare a record for handling.
1382
This method just returns the passed-in record. You may want to
1383
override this method if you need to do any custom marshalling or
1384
manipulation of the record before passing it to the handlers.
1388
def handle(self, record):
1392
This just loops through the handlers offering them the record
1395
record = self.prepare(record)
1396
for handler in self.handlers:
1397
handler.handle(record)
1401
Monitor the queue for records, and ask the handler
1404
This method runs on a separate, internal thread.
1405
The thread will terminate if it sees a sentinel object in the queue.
1408
has_task_done = hasattr(q, 'task_done')
1409
while not self._stop.isSet():
1411
record = self.dequeue(True)
1412
if record is self._sentinel:
1419
# There might still be records in the queue.
1422
record = self.dequeue(False)
1423
if record is self._sentinel:
1431
def enqueue_sentinel(self):
1433
This is used to enqueue the sentinel record.
1435
The base implementation uses put_nowait. You may want to override this
1436
method if you want to use timeouts or work with custom queue
1439
self.queue.put_nowait(self._sentinel)
1445
This asks the thread to terminate, and then waits for it to do so.
1446
Note that if you don't call this before your application exits, there
1447
may be some records still left on the queue, which won't be processed.
1450
self.enqueue_sentinel()