Merge lp:~alfredo-valles/python-concurrent-log-handler/python-concurrent-log-handler-timerot into lp:python-concurrent-log-handler

Proposed by Alfredo Valles
Status: Needs review
Proposed branch: lp:~alfredo-valles/python-concurrent-log-handler/python-concurrent-log-handler-timerot
Merge into: lp:python-concurrent-log-handler
Diff against target: 767 lines (+540/-29)
2 files modified
src/cloghandler.py (+498/-16)
stresstest.py (+42/-13)
To merge this branch: bzr merge lp:~alfredo-valles/python-concurrent-log-handler/python-concurrent-log-handler-timerot
Reviewer Review Type Date Requested Status
Lowell Alleman Pending
Review via email: mp+241705@code.launchpad.net

Description of the change

Add log handler for time based rotation

To post a comment you must log in.

Unmerged revisions

32. By Alfredo Valles

Add time rotation as a mode of operation for stresstest

31. By Alfredo Valles

Add log handler for time based rotation

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'src/cloghandler.py'
2--- src/cloghandler.py 2013-07-11 01:34:47 +0000
3+++ src/cloghandler.py 2014-11-13 16:11:31 +0000
4@@ -48,6 +48,7 @@
5 __author__ = "Lowell Alleman"
6 __all__ = [
7 "ConcurrentRotatingFileHandler",
8+ "ConcurrentTimeRotatingFileHandler"
9 ]
10
11
12@@ -62,6 +63,11 @@
13 except ImportError:
14 codecs = None
15
16+import re
17+import time
18+from stat import ST_MTIME
19+
20+_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
21
22
23 # Question/TODO: Should we have a fallback mode if we can't load portalocker /
24@@ -147,7 +153,7 @@
25 # that should be supported. For the moment, we are just going to warn
26 # the user if they provide a relative path and do some other voodoo
27 # logic that you'll just have to review for yourself.
28-
29+
30 # if the given filename contains no path, we make an absolute path
31 if not os.path.isabs(filename):
32 if FORCE_ABSOLUTE_PATH or \
33@@ -163,7 +169,7 @@
34 except TypeError: # Due to a different logging release without encoding support (Python 2.4.1 and earlier?)
35 BaseRotatingHandler.__init__(self, filename, mode)
36 self.encoding = encoding
37-
38+
39 self._rotateFailed = False
40 self.maxBytes = maxBytes
41 self.backupCount = backupCount
42@@ -173,17 +179,17 @@
43 else:
44 lock_file = filename
45 self.stream_lock = open(lock_file + ".lock", "w")
46-
47+
48 # For debug mode, swap out the "_degrade()" method with a more a verbose one.
49 if debug:
50 self._degrade = self._degrade_debug
51-
52+
53 def _openFile(self, mode):
54 if self.encoding:
55 self.stream = codecs.open(self.baseFilename, mode, self.encoding)
56 else:
57 self.stream = open(self.baseFilename, mode)
58-
59+
60 def acquire(self):
61 """ Acquire thread and file locks. Also re-opening log file when running
62 in 'degraded' mode. """
63@@ -192,7 +198,7 @@
64 lock(self.stream_lock, LOCK_EX)
65 if self.stream.closed:
66 self._openFile(self.mode)
67-
68+
69 def release(self):
70 """ Release file and thread locks. Flush stream and take care of closing
71 stream in 'degraded' mode. """
72@@ -211,7 +217,7 @@
73 finally:
74 # release thread lock
75 Handler.release(self)
76-
77+
78 def close(self):
79 """
80 Closes the stream.
81@@ -222,7 +228,7 @@
82 if not self.stream_lock.closed:
83 self.stream_lock.close()
84 Handler.close(self)
85-
86+
87 def flush(self):
88 """ flush(): Do nothing.
89
90@@ -235,12 +241,12 @@
91 another process to write to the log file in between calling
92 stream.write() and stream.flush(), which seems like a bad thing. """
93 pass
94-
95+
96 def _degrade(self, degrade, msg, *args):
97 """ Set degrade mode or not. Ignore msg. """
98 self._rotateFailed = degrade
99 del msg, args # avoid pychecker warnings
100-
101+
102 def _degrade_debug(self, degrade, msg, *args):
103 """ A more colorful version of _degade(). (This is enabled by passing
104 "debug=True" at initialization).
105@@ -255,7 +261,7 @@
106 sys.stderr.write("Degrade mode - EXITING - (pid=%d) %s\n" %
107 (os.getpid(), msg % args))
108 self._rotateFailed = False
109-
110+
111 def doRollover(self):
112 """
113 Do a rollover, as described in __init__().
114@@ -280,11 +286,11 @@
115 self._degrade(True, "rename failed. File in use? "
116 "exception=%s", exc_value)
117 return
118-
119+
120 # Q: Is there some way to protect this code from a KeboardInterupt?
121 # This isn't necessarily a data loss issue, but it certainly would
122 # break the rotation process during my stress testing.
123-
124+
125 # There is currently no mechanism in place to handle the situation
126 # where one of these log files cannot be renamed. (Example, user
127 # opens "logfile.3" in notepad)
128@@ -304,7 +310,7 @@
129 self._degrade(False, "Rotation completed")
130 finally:
131 self._openFile(self.mode)
132-
133+
134 def shouldRollover(self, record):
135 """
136 Determine if rollover should occur.
137@@ -322,7 +328,7 @@
138 self._openFile(self.mode)
139 return self._shouldRollover()
140 return False
141-
142+
143 def _shouldRollover(self):
144 if self.maxBytes > 0: # are we rolling over?
145 self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
146@@ -333,7 +339,483 @@
147 return False
148
149
150-# Publish this class to the "logging.handlers" module so that it can be use
151+class ConcurrentTimeRotatingFileHandler(BaseRotatingHandler):
152+
153+ """
154+ Handler for logging to a set of files, which switches from one file to the
155+ next when the current file reaches a certain size. Multiple processes can
156+ write to the log file concurrently, but this may mean that the file will
157+ exceed the given size.
158+ """
159+
160+ def __init__(
161+ self,
162+ filename,
163+ when='h',
164+ interval=1,
165+ backupCount=0,
166+ encoding=None,
167+ utc=False,
168+ mode='a',
169+ debug=True,
170+ delay=0,
171+ ):
172+ """
173+ Open the specified file and use it as the stream for logging.
174+
175+ By default, the file grows indefinitely. You can specify particular
176+ values of maxBytes and backupCount to allow the file to rollover at
177+ a predetermined size.
178+
179+ Rollover occurs whenever the current log file is nearly maxBytes in
180+ length. If backupCount is >= 1, the system will successively create
181+ new files with the same pathname as the base file, but with extensions
182+ ".1", ".2" etc. appended to it. For example, with a backupCount of 5
183+ and a base file name of "app.log", you would get "app.log",
184+ "app.log.1", "app.log.2", ... through to "app.log.5". The file being
185+ written to is always "app.log" - when it gets filled up, it is closed
186+ and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
187+ exist, then they are renamed to "app.log.2", "app.log.3" etc.
188+ respectively.
189+
190+ If maxBytes is zero, rollover never occurs.
191+
192+ On Windows, it is not possible to rename a file that is currently opened
193+ by another process. This means that it is not possible to rotate the
194+ log files if multiple processes is using the same log file. In this
195+ case, the current log file will continue to grow until the rotation can
196+ be completed successfully. In order for rotation to be possible, all of
197+ the other processes need to close the file first. A mechanism, called
198+ "degraded" mode, has been created for this scenario. In degraded mode,
199+ the log file is closed after each log message is written. So once all
200+ processes have entered degraded mode, the net rotation attempt should
201+ be successful and then normal logging can be resumed. Using the 'delay'
202+ parameter may help reduce contention in some usage patterns.
203+
204+ This log handler assumes that all concurrent processes logging to a
205+ single file will are using only this class, and that the exact same
206+ parameters are provided to each instance of this class. If, for
207+ example, two different processes are using this class, but with
208+ different values for 'maxBytes' or 'backupCount', then odd behavior is
209+ expected. The same is true if this class is used by one application, but
210+ the RotatingFileHandler is used by another.
211+ """
212+
213+ # filename, mode='a', maxBytes=0, backupCount=0, encoding=None, debug=True, delay=0):
214+ # Absolute file name handling done by FileHandler since Python 2.5
215+ BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
216+
217+ self.when = when.upper()
218+ self.backupCount = backupCount
219+ self.utc = utc
220+ # Calculate the real rollover interval, which is just the number of
221+ # seconds between rollovers. Also set the filename suffix used when
222+ # a rollover occurs. Current 'when' events supported:
223+ # S - Seconds
224+ # M - Minutes
225+ # H - Hours
226+ # D - Days
227+ # midnight - roll over at midnight
228+ # W{0-6} - roll over on a certain day; 0 - Monday
229+ #
230+ # Case of the 'when' specifier is not important; lower or upper case
231+ # will work.
232+ if self.when == 'S':
233+ self.interval = 1 # one second
234+ self.suffix = '%Y-%m-%d_%H-%M-%S'
235+ self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
236+ elif self.when == 'M':
237+ self.interval = 60 # one minute
238+ self.suffix = '%Y-%m-%d_%H-%M'
239+ self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
240+ elif self.when == 'H':
241+ self.interval = 60 * 60 # one hour
242+ self.suffix = '%Y-%m-%d_%H'
243+ self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
244+ elif self.when == 'D' or self.when == 'MIDNIGHT':
245+ self.interval = 60 * 60 * 24 # one day
246+ self.suffix = '%Y-%m-%d'
247+ self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
248+ elif self.when.startswith('W'):
249+ self.interval = 60 * 60 * 24 * 7 # one week
250+ if len(self.when) != 2:
251+ raise ValueError('You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s' % self.when)
252+ if self.when[1] < '0' or self.when[1] > '6':
253+ raise ValueError('Invalid day specified for weekly rollover: %s' % self.when)
254+ self.dayOfWeek = int(self.when[1])
255+ self.suffix = '%Y-%m-%d'
256+ self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
257+ else:
258+ raise ValueError('Invalid rollover interval specified: %s' % self.when)
259+
260+ self.extMatch = re.compile(self.extMatch)
261+ self.interval = self.interval * interval # multiply by units requested
262+ if os.path.exists(filename):
263+ t = os.stat(filename)[ST_MTIME]
264+ else:
265+ t = int(time.time())
266+ self.rolloverAt = self.computeRollover(t)
267+
268+ self.delay = delay
269+ self._rotateFailed = False
270+ self.backupCount = backupCount
271+
272+ self._open_lockfile()
273+ # For debug mode, swap out the "_degrade()" method with a more a verbose one.
274+ if debug:
275+ self._degrade = self._degrade_debug
276+
277+ def computeRollover(self, currentTime):
278+ """
279+ Work out the rollover time based on the specified time.
280+ """
281+
282+ result = currentTime + self.interval
283+ # If we are rolling over at midnight or weekly, then the interval is already known.
284+ # What we need to figure out is WHEN the next interval is. In other words,
285+ # if you are rolling over at midnight, then your base interval is 1 day,
286+ # but you want to start that one day clock at midnight, not now. So, we
287+ # have to fudge the rolloverAt value in order to trigger the first rollover
288+ # at the right time. After that, the regular interval will take care of
289+ # the rest. Note that this code doesn't care about leap seconds. :)
290+ if self.when == 'MIDNIGHT' or self.when.startswith('W'):
291+ # This could be done with less code, but I wanted it to be clear
292+ if self.utc:
293+ t = time.gmtime(currentTime)
294+ else:
295+ t = time.localtime(currentTime)
296+ currentHour = t[3]
297+ currentMinute = t[4]
298+ currentSecond = t[5]
299+ # r is the number of seconds left between now and midnight
300+ r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 + currentSecond)
301+ result = currentTime + r
302+ # If we are rolling over on a certain day, add in the number of days until
303+ # the next rollover, but offset by 1 since we just calculated the time
304+ # until the next day starts. There are three cases:
305+ # Case 1) The day to rollover is today; in this case, do nothing
306+ # Case 2) The day to rollover is further in the interval (i.e., today is
307+ # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
308+ # next rollover is simply 6 - 2 - 1, or 3.
309+ # Case 3) The day to rollover is behind us in the interval (i.e., today
310+ # is day 5 (Saturday) and rollover is on day 3 (Thursday).
311+ # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
312+ # number of days left in the current week (1) plus the number
313+ # of days in the next week until the rollover day (3).
314+ # The calculations described in 2) and 3) above need to have a day added.
315+ # This is because the above time calculation takes us to midnight on this
316+ # day, i.e. the start of the next day.
317+ if self.when.startswith('W'):
318+ day = t[6] # 0 is Monday
319+ if day != self.dayOfWeek:
320+ if day < self.dayOfWeek:
321+ daysToWait = self.dayOfWeek - day
322+ else:
323+ daysToWait = 6 - day + self.dayOfWeek + 1
324+ newRolloverAt = result + daysToWait * (60 * 60 * 24)
325+ if not self.utc:
326+ dstNow = t[-1]
327+ dstAtRollover = time.localtime(newRolloverAt)[-1]
328+ if dstNow != dstAtRollover:
329+ if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
330+ addend = -3600
331+ else:
332+ # DST bows out before next rollover, so we need to add an hour
333+ addend = 3600
334+ newRolloverAt += addend
335+ result = newRolloverAt
336+ return result
337+
338+ def _open_lockfile(self):
339+ # Use 'file.lock' and not 'file.log.lock' (Only handles the normal "*.log" case.)
340+ if self.baseFilename.endswith('.log'):
341+ lock_file = self.baseFilename[:-4]
342+ else:
343+ lock_file = self.baseFilename
344+ lock_file += '.lock'
345+ self.stream_lock = open(lock_file, 'w')
346+
347+ def _open(self, mode=None):
348+ """
349+ Open the current base file with the (original) mode and encoding.
350+ Return the resulting stream.
351+
352+ Note: Copied from stdlib. Added option to override 'mode'
353+ """
354+
355+ if mode is None:
356+ mode = self.mode
357+ if self.encoding is None:
358+ stream = open(self.baseFilename, mode)
359+ else:
360+ stream = codecs.open(self.baseFilename, mode, self.encoding)
361+ return stream
362+
363+ def _close(self):
364+ """ Close file stream. Unlike close(), we don't tear anything down, we
365+ expect the log to be re-opened after rotation."""
366+
367+ if self.stream:
368+ try:
369+ if not self.stream.closed:
370+ # Flushing probably isn't technically necessary, but it feels right
371+ self.stream.flush()
372+ self.stream.close()
373+ finally:
374+ self.stream = None
375+
376+ def acquire(self):
377+ """ Acquire thread and file locks. Re-opening log for 'degraded' mode.
378+ """
379+
380+ # handle thread lock
381+ Handler.acquire(self)
382+ # Issue a file lock. (This is inefficient for multiple active threads
383+ # within a single process. But if you're worried about high-performance,
384+ # you probably aren't using this log handler.)
385+ if self.stream_lock:
386+ # If stream_lock=None, then assume close() was called or something
387+ # else weird and ignore all file-level locks.
388+ if self.stream_lock.closed:
389+ # Daemonization can close all open file descriptors, see
390+ # https://bugzilla.redhat.com/show_bug.cgi?id=952929
391+ # Try opening the lock file again. Should we warn() here?!?
392+ try:
393+ self._open_lockfile()
394+ except Exception:
395+ self.handleError(NullLogRecord())
396+ # Don't try to open the stream lock again
397+ self.stream_lock = None
398+ return
399+ lock(self.stream_lock, LOCK_EX)
400+
401+ # Stream will be opened as part by FileHandler.emit()
402+
403+ def release(self):
404+ """ Release file and thread locks. If in 'degraded' mode, close the
405+ stream to reduce contention until the log files can be rotated. """
406+
407+ try:
408+ if self._rotateFailed:
409+ self._close()
410+ except Exception:
411+ self.handleError(NullLogRecord())
412+ finally:
413+ try:
414+ if self.stream_lock and not self.stream_lock.closed:
415+ unlock(self.stream_lock)
416+ except Exception:
417+ self.handleError(NullLogRecord())
418+ finally:
419+ # release thread lock
420+ Handler.release(self)
421+
422+ def close(self):
423+ """
424+ Close log stream and stream_lock. """
425+
426+ try:
427+ self._close()
428+ if not self.stream_lock.closed:
429+ self.stream_lock.close()
430+ finally:
431+ self.stream_lock = None
432+ Handler.close(self)
433+
434+ def _degrade(self, degrade, msg, *args):
435+ """ Set degrade mode or not. Ignore msg. """
436+
437+ self._rotateFailed = degrade
438+ del msg # avoid pychecker warnings
439+ del args
440+
441+ def _degrade_debug(self, degrade, msg, *args):
442+ """ A more colorful version of _degade(). (This is enabled by passing
443+ "debug=True" at initialization).
444+ """
445+
446+ if degrade:
447+ if not self._rotateFailed:
448+ sys.stderr.write('Degrade mode - ENTERING - (pid=%d) %s\n' % (os.getpid(), msg % args))
449+ self._rotateFailed = True
450+ else:
451+ if self._rotateFailed:
452+ sys.stderr.write('Degrade mode - EXITING - (pid=%d) %s\n' % (os.getpid(), msg % args))
453+ self._rotateFailed = False
454+
455+ def _next_rotation_filename(self, currentTime=None):
456+ # get the time that this sequence started at and make it a TimeTuple
457+ currentTime = currentTime or int(time.time())
458+ dstNow = time.localtime(currentTime)[-1]
459+ t = self.rolloverAt - self.interval
460+ if self.utc:
461+ timeTuple = time.gmtime(t)
462+ else:
463+ timeTuple = time.localtime(t)
464+ dstThen = timeTuple[-1]
465+ if dstNow != dstThen:
466+ if dstNow:
467+ addend = 3600
468+ else:
469+ addend = -3600
470+ timeTuple = time.localtime(t + addend)
471+ dfn = self.baseFilename + '.' + time.strftime(self.suffix, timeTuple)
472+ return dfn
473+
474+ def _update_rollover_time(self, currentTime=None):
475+ currentTime = currentTime or int(time.time())
476+ dstNow = time.localtime(currentTime)[-1]
477+
478+ newRolloverAt = self.computeRollover(currentTime)
479+
480+ while newRolloverAt <= currentTime:
481+ newRolloverAt += self.interval
482+
483+ # If DST changes and midnight or weekly rollover, adjust for this.
484+ if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
485+ dstAtRollover = time.localtime(newRolloverAt)[-1]
486+ if dstNow != dstAtRollover:
487+ if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
488+ addend = -3600
489+ else:
490+ # DST bows out before next rollover, so we need to add an hour
491+ addend = 3600
492+ newRolloverAt += addend
493+
494+ self.rolloverAt = newRolloverAt
495+
496+ def doRollover(self):
497+ """
498+ Do a rollover, as described in __init__().
499+ """
500+
501+ self._close()
502+ if self.backupCount < 0:
503+ # Don't keep any backups, just overwrite the existing backup file
504+ # Locking doesn't much matter here; since we are overwriting it anyway
505+ self.stream = self._open('w')
506+ return
507+
508+ try:
509+ # Determine if we can rename the log file or not. Windows refuses to
510+ # rename an open file, Unix is inode base so it doesn't care.
511+
512+ # Attempt to rename logfile to tempname: There is a slight race-condition here, but it seems unavoidable
513+ tmpname = None
514+ while not tmpname or os.path.exists(tmpname):
515+ tmpname = '%s.rotate.%08d' % (self.baseFilename, randint(0, 99999999))
516+ try:
517+ # Do a rename test to determine if we can successfully rename the log file
518+ os.rename(self.baseFilename, tmpname)
519+ except (IOError, OSError):
520+ exc_value = sys.exc_info()[1]
521+ self._degrade(True, 'rename failed. File in use? exception=%s', exc_value)
522+ return
523+
524+ currentTime = int(time.time())
525+ dfn = self._next_rotation_filename(currentTime=currentTime)
526+ if os.path.exists(dfn):
527+ os.remove(dfn)
528+
529+ # Issue 18940: A file may not have been created if delay is True.
530+ if os.path.exists(tmpname):
531+ # os.rename(self.baseFilename, dfn)
532+ os.rename(tmpname, dfn)
533+ if self.backupCount > 0:
534+ for s in self.getFilesToDelete():
535+ os.remove(s)
536+
537+ self._update_rollover_time(currentTime=currentTime)
538+
539+ # print "%s -> %s" % (self.baseFilename, dfn)
540+ self._degrade(False, 'Rotation completed')
541+ finally:
542+
543+ # Re-open the output stream, but if "delay" is enabled then wait
544+ # until the next emit() call. This could reduce rename contention in
545+ # some usage patterns.
546+ if not self.delay:
547+ self.stream = self._open()
548+
549+ def shouldRollover(self, record):
550+ """
551+ Determine if rollover should occur.
552+
553+ For those that are keeping track. This differs from the standard
554+ library's RotatingLogHandler class. Because there is no promise to keep
555+ the file size under maxBytes we ignore the length of the current record.
556+ """
557+
558+ del record # avoid pychecker warnings
559+ # Is stream is not yet open, skip rollover check. (Check will occur on
560+ # next message, after emit() calls _open())
561+ if self.stream is None:
562+ return False
563+ if self._shouldRollover():
564+ # If some other process already did the rollover (which is possible
565+ # on Unix) the file our stream may now be named "log.2014-09-30_16-42", thus
566+ # triggering another rollover. Avoid this
567+
568+ try:
569+ replaced = self._is_stream_rotated()
570+ if replaced:
571+ self._update_rollover_time()
572+ self._close()
573+ self.stream = self._open()
574+ return not replaced
575+ except Exception as e:
576+ self._degrade(True, 'something went very wrong: {}'.format(e))
577+ return False
578+
579+ def _shouldRollover(self):
580+ t = int(time.time())
581+ if t >= self.rolloverAt:
582+ return True
583+ # print "No need to rollover: %d, %d" % (t, self.rolloverAt)
584+ self._degrade(False, 'Rotation done or not needed at this time')
585+
586+ return False
587+
588+ def _is_stream_rotated(self):
589+ """
590+ Determine if other process already did the rollover
591+ """
592+ if os.name in ['posix', 'java']:
593+ file_object_stat = os.fstat(self.stream.fileno())
594+ file_on_disk_stat = os.stat(self.baseFilename)
595+ return not os.path.samestat(file_object_stat, file_on_disk_stat) # Availability: Unix
596+ else:
597+ # on windows, somehow a weaker test but should do the trick
598+ dfn = self._next_rotation_filename()
599+ return os.path.exists(dfn)
600+
601+ def getFilesToDelete(self):
602+ """
603+ Determine the files to delete when rolling over.
604+
605+ More specific than the earlier method, which just used glob.glob().
606+ """
607+
608+ dirName, baseName = os.path.split(self.baseFilename)
609+ fileNames = os.listdir(dirName)
610+ result = []
611+ prefix = baseName + '.'
612+ plen = len(prefix)
613+ for fileName in fileNames:
614+ if fileName[:plen] == prefix:
615+ suffix = fileName[plen:]
616+ if self.extMatch.match(suffix):
617+ result.append(os.path.join(dirName, fileName))
618+ result.sort()
619+ if len(result) < self.backupCount:
620+ result = []
621+ else:
622+ result = result[:len(result) - self.backupCount]
623+ return result
624+
625+
626+# Publish this class to the "logging.handlers" module so that it can be use
627 # from a logging config file via logging.config.fileConfig().
628 import logging.handlers
629 logging.handlers.ConcurrentRotatingFileHandler = ConcurrentRotatingFileHandler
630+logging.handlers.ConcurrentTimeRotatingFileHandler = ConcurrentTimeRotatingFileHandler
631\ No newline at end of file
632
633=== modified file 'stresstest.py'
634--- stresstest.py 2013-07-06 21:49:10 +0000
635+++ stresstest.py 2014-11-13 16:11:31 +0000
636@@ -24,7 +24,7 @@
637 ROTATE_COUNT = 5000
638
639 # local lib; for testing
640-from cloghandler import ConcurrentRotatingFileHandler
641+from cloghandler import ConcurrentRotatingFileHandler, ConcurrentTimeRotatingFileHandler
642
643 class RotateLogStressTester:
644 def __init__(self, sharedfile, uniquefile, name="LogStressTester"):
645@@ -34,14 +34,19 @@
646 self.writeLoops = 100000
647 self.rotateSize = 128 * 1024
648 self.rotateCount = ROTATE_COUNT
649+ self.time_rotation_mode = False
650+ self.rotateWhen = 'S'
651 self.random_sleep_mode = False
652 self.debug = False
653-
654+
655 def getLogHandler(self, fn):
656 """ Override this method if you want to test a different logging handler
657 class. """
658- return ConcurrentRotatingFileHandler(fn, 'a', self.rotateSize,
659- self.rotateCount, debug=self.debug)
660+ if not self.time_rotation_mode:
661+ return ConcurrentRotatingFileHandler(fn, 'a', self.rotateSize,
662+ self.rotateCount, debug=self.debug)
663+ else:
664+ return ConcurrentTimeRotatingFileHandler(fn, when=self.rotateWhen, debug=self.debug)
665 # To run the test with the standard library's RotatingFileHandler:
666 # from logging.handlers import RotatingFileHandler
667 # return RotatingFileHandler(fn, 'a', self.rotateSize, self.rotateCount)
668@@ -96,11 +101,20 @@
669 # break
670 self.log.info("Done witting random log messages.")
671
672-def iter_lognames(logfile, count):
673+def iter_lognames(logfile, count, time_rotation=False):
674 """ Generator for log file names based on a rotation scheme """
675- for i in range(count -1, 0, -1):
676- yield "%s.%d" % (logfile, i)
677- yield logfile
678+
679+ if time_rotation:
680+ for (root, dirs, files) in os.walk(os.path.dirname(logfile), topdown=True):
681+ for fn in sorted(files, reverse=True):
682+ if str(fn).startswith(os.path.basename(logfile)):
683+ # print "I'm going to yield: ", os.path.join(root, fn)
684+ yield os.path.join(root, fn)
685+ break
686+ else:
687+ for i in range(count -1, 0, -1):
688+ yield "%s.%d" % (logfile, i)
689+ yield logfile
690
691 def iter_logs(iterable, missing_ok=False):
692 """ Generator to extract log entries from shared log file. """
693@@ -126,12 +140,18 @@
694 description="Stress test the cloghandler module.")
695 parser.add_option("--log-calls", metavar="NUM",
696 action="store", type="int", default=50000,
697- help="Number of logging entries to write to each log file. "
698+ help="Total Number of entries to be logged by each process. "
699 "Default is %default")
700+parser.add_option("--log-when", metavar="WHEN",
701+ action="store", type="str", default='S',
702+ help="Perform time-based file rotation. Default is 'S' (rotate every second). "
703+ "This option has no effect unless --time-rotation-mode is also passed.")
704 parser.add_option("--random-sleep-mode",
705 action="store_true", default=False)
706 parser.add_option("--debug",
707 action="store_true", default=False)
708+parser.add_option("--time-rotation-mode",
709+ action="store_true", default=False)
710
711
712 def main_client(args):
713@@ -147,6 +167,8 @@
714 tester.random_sleep_mode = options.random_sleep_mode
715 tester.debug = options.debug
716 tester.writeLoops = options.log_calls
717+ tester.rotateWhen = options.log_when
718+ tester.time_rotation_mode = options.time_rotation_mode
719 tester.start()
720 print("We are done pid=%d" % os.getpid())
721
722@@ -207,7 +229,7 @@
723 action="store", type="int", default=3,
724 help="Number of processes to spawn. Default: %default")
725 parser.add_option("--delay", metavar="secs",
726- action="store", type="float", default=2.5,
727+ action="store", type="float", default=0.2,
728 help="Wait SECS before spawning next processes. "
729 "Default: %default")
730 parser.add_option("-p", "--path", metavar="DIR",
731@@ -220,18 +242,24 @@
732 options.path = os.path.abspath(options.path)
733 if not os.path.isdir(options.path):
734 os.makedirs(options.path)
735+ #TODO: cleanup test directory
736
737 manager = TestManager()
738 shared = os.path.join(options.path, "shared.log")
739 for client_id in range(options.processes):
740 client = os.path.join(options.path, "client.log_client%s.log" % client_id)
741 cmdline = [ sys.executable, this_script, "client", shared, client,
742- "--log-calls=%d" % options.log_calls ]
743+ "--log-calls=%d" % options.log_calls,
744+ "--log-when=%s" % options.log_when]
745+ if options.time_rotation_mode:
746+ cmdline.append("--time-rotation-mode")
747 if options.random_sleep_mode:
748 cmdline.append("--random-sleep-mode")
749 if options.debug:
750 cmdline.append("--debug")
751-
752+
753+ print 'main_runner is going to execute cmdline: {}\n'.format(cmdline)
754+
755 child = manager.launchPopen(cmdline)
756 child.update(sharedfile=shared, clientfile=client)
757 sleep(options.delay)
758@@ -261,7 +289,8 @@
759 print("done.")
760
761 print("Writing out combined shared logs...")
762- shared_log_files = iter_lognames(shared, ROTATE_COUNT)
763+
764+ shared_log_files = iter_lognames(shared, ROTATE_COUNT, time_rotation=bool(options.log_when))
765 log_lines = iter_logs(shared_log_files, missing_ok=True)
766 combine_logs(shared_combo, sort_em(log_lines))
767 print("done.")

Subscribers

People subscribed via source and target branches

to all changes: