Commit fe1edd81 authored by Tim Peters's avatar Tim Peters

Merge r25929 from 3.3 branch.

Collector 1327:  FileStorage init confused by time travel

If the system clock moved back in time more than 30 days between
a FileStorage close and (re)open, new txn ids could be smaller
than txn ids already in the database.  Plugged that hole.
parent 225de4d2
What's new in ZODB3 3.3
=======================
Release date: DD-MMM-2004
Storages
--------
Collector #1327 FileStorage init confused by time travel.
If the system clock "went backwards" a long time between the times a
FileStorage was closed and reopened, new transaction ids could be
smaller than transaction ids already in the storage, violating a
key invariant. Now transaction ids are guaranteed to be increasing
even when this happens. If time appears to have run backwards at all
when a FileStorage is opened, a new message saying so is logged at
warning level; if time appears to have run backwards at least 30
minutes, the message is logged at critical level (and you should
investigate to find and repair the true cause).
What's new in ZODB3 3.3 beta 1 What's new in ZODB3 3.3 beta 1
============================== ==============================
Release date: 07-Jun-2004 Release date: 07-Jun-2004
......
...@@ -191,10 +191,12 @@ class FileStorage(BaseStorage.BaseStorage, ...@@ -191,10 +191,12 @@ class FileStorage(BaseStorage.BaseStorage,
t = time.time() t = time.time()
t = TimeStamp(*time.gmtime(t)[:5] + (t % 60,)) t = TimeStamp(*time.gmtime(t)[:5] + (t % 60,))
if tid > t: if tid > t:
logger.warning("%s Database records in the future", file_name); seconds = tid.timeTime() - t.timeTime()
if tid.timeTime() - t.timeTime() > 86400*30: complainer = logger.warning
# a month in the future? This is bogus, use current time if seconds > 30 * 60: # 30 minutes -- way screwed up
self._ts = t complainer = logger.critical
complainer("%s Database records %d seconds in the future",
file_name, seconds)
self._quota = quota self._quota = quota
......
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Support for testing logging code
If you want to test that your code generates proper log output, you
can create and install a handler that collects output:
>>> handler = InstalledHandler('foo.bar')
The handler is installed into loggers for all of the names passed. In
addition, the logger level is set to 1, which means, log
everything. If you want to log less than everything, you can provide a
level keyword argument. The level setting effects only the named
loggers.
Then, any log output is collected in the handler:
>>> logging.getLogger('foo.bar').exception('eek')
>>> logging.getLogger('foo.bar').info('blah blah')
>>> for record in handler.records:
... print record.name, record.levelname
... print ' ', record.getMessage()
foo.bar ERROR
eek
foo.bar INFO
blah blah
A similar effect can be gotten by just printing the handler:
>>> print handler
foo.bar ERROR
eek
foo.bar INFO
blah blah
After checking the log output, you need to uninstall the handler:
>>> handler.uninstall()
At which point, the handler won't get any more log output.
Let's clear the handler:
>>> handler.clear()
>>> handler.records
[]
And then log something:
>>> logging.getLogger('foo.bar').info('blah')
and, sure enough, we still have no output:
>>> handler.records
[]
$Id$
"""
import logging
class Handler(logging.Handler):
def __init__(self, *names, **kw):
logging.Handler.__init__(self)
self.names = names
self.records = []
self.setLoggerLevel(**kw)
def setLoggerLevel(self, level=1):
self.level = level
self.oldlevels = {}
def emit(self, record):
self.records.append(record)
def clear(self):
del self.records[:]
def install(self):
for name in self.names:
logger = logging.getLogger(name)
self.oldlevels[name] = logger.level
logger.setLevel(self.level)
logger.addHandler(self)
def uninstall(self):
for name in self.names:
logger = logging.getLogger(name)
logger.setLevel(self.oldlevels[name])
logger.removeHandler(self)
def __str__(self):
return '\n'.join(
[("%s %s\n %s" %
(record.name, record.levelname,
'\n'.join([line
for line in record.getMessage().split('\n')
if line.strip()])
)
)
for record in self.records]
)
class InstalledHandler(Handler):
def __init__(self, *names):
Handler.__init__(self, *names)
self.install()
...@@ -205,12 +205,109 @@ class SlowFileStorageTest(BaseFileStorageTests): ...@@ -205,12 +205,109 @@ class SlowFileStorageTest(BaseFileStorageTests):
self._storage.tpc_vote(t) self._storage.tpc_vote(t)
self._storage.tpc_finish(t) self._storage.tpc_finish(t)
# Raise an exception if the tids in FileStorage fs aren't
# strictly increasing.
def checkIncreasingTids(fs):
lasttid = '\0' * 8
for txn in fs.iterator():
if lasttid >= txn.tid:
raise ValueError("tids out of order %r >= %r" % (lasttid, tid))
lasttid = txn.tid
# Return a TimeStamp object 'minutes' minutes in the future.
def timestamp(minutes):
import time
from persistent.TimeStamp import TimeStamp
t = time.time() + 60 * minutes
return TimeStamp(*time.gmtime(t)[:5] + (t % 60,))
def testTimeTravelOnOpen():
"""
>>> from ZODB.FileStorage import FileStorage
>>> from ZODB.DB import DB
>>> import transaction
>>> from ZODB.tests.loggingsupport import InstalledHandler
Arrange to capture log messages -- they're an important part of
this test!
>>> handler = InstalledHandler('ZODB.FileStorage')
Create a new file storage.
>>> st = FileStorage('temp.fs', create=True)
>>> db = DB(st)
>>> db.close()
First check the normal case: transactions are recorded with
increasing tids, and time doesn't run backwards.
>>> st = FileStorage('temp.fs')
>>> db = DB(st)
>>> conn = db.open()
>>> conn.root()['xyz'] = 1
>>> transaction.get().commit()
>>> checkIncreasingTids(st)
>>> db.close()
>>> st.cleanup() # remove .fs, .index, etc files
>>> handler.records # i.e., no log messages
[]
Now force the database to have transaction records with tids from
the future.
>>> st = FileStorage('temp.fs', create=True)
>>> st._ts = timestamp(15) # 15 minutes in the future
>>> db = DB(st)
>>> db.close()
>>> st = FileStorage('temp.fs') # this should log a warning
>>> db = DB(st)
>>> conn = db.open()
>>> conn.root()['xyz'] = 1
>>> transaction.get().commit()
>>> checkIncreasingTids(st)
>>> db.close()
>>> st.cleanup()
>>> [record.levelname for record in handler.records]
['WARNING']
>>> handler.clear()
And one more time, with transaction records far in the future.
We expect to log a critical error then, as a time so far in the
future probably indicates a real problem with the system. Shorter
spans may be due to clock drift.
>>> st = FileStorage('temp.fs', create=True)
>>> st._ts = timestamp(60) # an hour in the future
>>> db = DB(st)
>>> db.close()
>>> st = FileStorage('temp.fs') # this should log a critical error
>>> db = DB(st)
>>> conn = db.open()
>>> conn.root()['xyz'] = 1
>>> transaction.get().commit()
>>> checkIncreasingTids(st)
>>> db.close()
>>> st.cleanup()
>>> [record.levelname for record in handler.records]
['CRITICAL']
>>> handler.clear()
>>> handler.uninstall()
"""
def test_suite(): def test_suite():
import doctest
suite = unittest.TestSuite() suite = unittest.TestSuite()
for klass in [FileStorageTests, Corruption.FileStorageCorruptTests, for klass in [FileStorageTests, Corruption.FileStorageCorruptTests,
FileStorageRecoveryTest, SlowFileStorageTest]: FileStorageRecoveryTest, SlowFileStorageTest]:
suite.addTest(unittest.makeSuite(klass, "check")) suite.addTest(unittest.makeSuite(klass, "check"))
suite.addTest(doctest.DocTestSuite())
return suite return suite
if __name__=='__main__': if __name__=='__main__':
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment