Commit addec006 authored by Jim Fulton's avatar Jim Fulton

Merge branch 'load-calls-loadBefore' of github.com:zopefoundation/ZODB into load-calls-loadBefore

Conflicts:
	src/ZODB/utils.py
parents e8a853a3 7ab793d2
......@@ -2,7 +2,12 @@
Change History
================
4.3.0 (unreleased)
4.3.1 (2016-06-06)
==================
- Fixed: FileStorage loadBefore didn't handle deleted/undone data correctly.
4.3.0 (2016-05-31)
==================
- Drop support for Python 2.6 and 3.2.
......@@ -29,7 +34,7 @@
- DemoStorage: add support for conflict resolution and fix history()
https://github.com/zopefoundation/ZODB/pull/58
- Fixed: FileStorage loadBefore didn't handle deleted/undone data correctly.
- Fixed a test that depended on implementation-specific behavior in tpc_finish
4.2.0 (2015-06-02)
==================
......
......@@ -20,13 +20,12 @@ to application logic. ZODB includes features such as a plugable storage
interface, rich transaction support, and undo.
"""
version = "4.3.0.dev0"
version = "5.0.dev0"
import os
from setuptools import setup, find_packages
classifiers = """\
Development Status :: 4 - Beta
Intended Audience :: Developers
License :: OSI Approved :: Zope Public License
Programming Language :: Python
......@@ -131,7 +130,7 @@ setup(name="ZODB",
license = "ZPL 2.1",
platforms = ["any"],
description = doclines[0],
classifiers = filter(None, classifiers.split("\n")),
classifiers = list(filter(None, classifiers.split("\n"))),
long_description = long_description,
test_suite="__main__.alltests", # to support "setup.py test"
tests_require = tests_require,
......@@ -142,7 +141,7 @@ setup(name="ZODB",
'persistent >= 4.2.0',
'BTrees >= 4.2.0',
'ZConfig',
'transaction >= 1.5.0',
'transaction >= 1.6.1',
'six',
'zc.lockfile',
'zope.interface',
......
This diff is collapsed.
......@@ -401,12 +401,6 @@ class DB(object):
- `xrefs` - Boolian flag indicating whether implicit cross-database
references are allowed
"""
if isinstance(storage, six.string_types):
from ZODB import FileStorage
storage = ZODB.FileStorage.FileStorage(storage, **storage_args)
elif storage is None:
from ZODB import MappingStorage
storage = ZODB.MappingStorage.MappingStorage(**storage_args)
# Allocate lock.
x = utils.RLock()
......@@ -423,12 +417,24 @@ class DB(object):
self._historical_cache_size_bytes = historical_cache_size_bytes
# Setup storage
if isinstance(storage, six.string_types):
from ZODB import FileStorage
storage = ZODB.FileStorage.FileStorage(storage, **storage_args)
elif storage is None:
from ZODB import MappingStorage
storage = ZODB.MappingStorage.MappingStorage(**storage_args)
else:
assert not storage_args
self.storage = storage
if IMVCCStorage.providedBy(storage):
self._mvcc_storage = storage
else:
from .mvccadapter import MVCCAdapter
self._mvcc_storage = MVCCAdapter(storage)
self.references = ZODB.serialize.referencesf
try:
storage.registerDB(self)
except TypeError:
storage.registerDB(self, None) # Backward compat
if (not hasattr(storage, 'tpc_vote')) and not storage.isReadOnly():
warnings.warn(
......@@ -438,12 +444,10 @@ class DB(object):
DeprecationWarning, 2)
storage.tpc_vote = lambda *args: None
if IMVCCStorage.providedBy(storage):
temp_storage = storage.new_instance()
else:
temp_storage = storage
temp_storage = self._mvcc_storage.new_instance()
try:
try:
temp_storage.poll_invalidations()
temp_storage.load(z64, '')
except KeyError:
# Create the database's root in the storage if it doesn't exist
......@@ -462,8 +466,7 @@ class DB(object):
temp_storage.tpc_vote(t)
temp_storage.tpc_finish(t)
finally:
if IMVCCStorage.providedBy(temp_storage):
temp_storage.release()
temp_storage.release()
# Multi-database setup.
if databases is None:
......@@ -634,16 +637,13 @@ class DB(object):
@self._connectionMap
def _(c):
if c.opened:
c.transaction_manager.abort()
# Note that this will modify our pool, but this is safe, because
# _connectionMap makes a list of the pool to iterate over
c.close()
c.transaction_manager.abort()
c.afterCompletion = c.newTransaction = c.close = noop
c._release_resources()
self.storage.close()
self._mvcc_storage.close()
del self.storage
del self._mvcc_storage
def getCacheSize(self):
return self._cache_size
......@@ -675,27 +675,6 @@ class DB(object):
def getHistoricalTimeout(self):
return self.historical_pool.timeout
def invalidate(self, tid, oids, connection=None, version=''):
"""Invalidate references to a given oid.
This is used to indicate that one of the connections has committed a
change to the object. The connection commiting the change should be
passed in to prevent useless (but harmless) messages to the
connection.
"""
# Storages, esp. ZEO tests, need the version argument still. :-/
assert version==''
# Notify connections.
def inval(c):
if c is not connection:
c.invalidate(tid, oids)
self._connectionMap(inval)
def invalidateCache(self):
"""Invalidate each of the connection caches
"""
self._connectionMap(lambda c: c.invalidateCache())
transform_record_data = untransform_record_data = lambda self, data: data
def objectCount(self):
......@@ -762,8 +741,6 @@ class DB(object):
result = self.pool.pop()
assert result is not None
# open the connection.
# A good time to do some cache cleanup.
# (note we already have the lock)
self.pool.availableGC()
......@@ -1003,9 +980,9 @@ class TransactionalUndo(object):
def __init__(self, db, tids):
self._db = db
self._storage = db.storage
self._storage = getattr(
db._mvcc_storage, 'undo_instance', db._mvcc_storage.new_instance)()
self._tids = tids
self._oids = set()
def abort(self, transaction):
pass
......@@ -1015,19 +992,13 @@ class TransactionalUndo(object):
def commit(self, transaction):
for tid in self._tids:
result = self._storage.undo(tid, transaction)
if result:
self._oids.update(result[1])
self._storage.undo(tid, transaction)
def tpc_vote(self, transaction):
for oid, _ in self._storage.tpc_vote(transaction) or ():
self._oids.add(oid)
self._storage.tpc_vote(transaction)
def tpc_finish(self, transaction):
self._storage.tpc_finish(
transaction,
lambda tid: self._db.invalidate(tid, self._oids)
)
self._storage.tpc_finish(transaction)
def tpc_abort(self, transaction):
self._storage.tpc_abort(transaction)
......
......@@ -273,8 +273,6 @@ test.
>>> conn.root()['first']['count'] += 1
>>> conn.root()['third'] = persistent.mapping.PersistentMapping()
>>> transaction.commit()
>>> len(historical_conn._invalidated)
0
>>> historical_conn.close()
Note that if you try to open an historical connection to a time in the future,
......
......@@ -201,21 +201,6 @@ class IConnection(Interface):
def isReadOnly():
"""Returns True if the storage for this connection is read only."""
def invalidate(tid, oids):
"""Notify the Connection that transaction 'tid' invalidated oids.
When the next transaction boundary is reached, objects will be
invalidated. If any of the invalidated objects are accessed by the
current transaction, the revision written before Connection.tid will be
used.
The DB calls this method, even when the Connection is closed.
Parameters:
tid: the storage-level id of the transaction that committed
oids: oids is an iterable of oids.
"""
def root():
"""Return the database root object.
......@@ -278,14 +263,6 @@ class IConnection(Interface):
If clear is True, reset the counters.
"""
def invalidateCache():
"""Invalidate the connection cache
This invalidates *all* objects in the cache. If the connection
is open, subsequent reads will fail until a new transaction
begins or until the connection os reopned.
"""
def readCurrent(obj):
"""Make sure an object being read is current
......@@ -625,19 +602,6 @@ class IStorage(Interface):
otherwise, POSKeyError is raised.
"""
# The following two methods are effectively part of the interface,
# as they are generally needed when one storage wraps
# another. This deserves some thought, at probably debate, before
# adding them.
#
# def _lock_acquire():
# """Acquire the storage lock
# """
# def _lock_release():
# """Release the storage lock
# """
def new_oid():
"""Allocate a new object id.
......@@ -675,11 +639,7 @@ class IStorage(Interface):
The passed object is a wrapper object that provides an upcall
interface to support composition.
Note that, for historical reasons, an implementation may
require a second argument, however, if required, the None will
be passed as the second argument.
Also, for historical reasons, this is called registerDB rather
Note that, for historical reasons, this is called registerDB rather
than register_wrapper.
"""
......@@ -818,7 +778,6 @@ class IStorage(Interface):
"""
class IStorageRestoreable(IStorage):
"""Copying Transactions
......@@ -1110,11 +1069,9 @@ class IMVCCStorage(IStorage):
"""
def release():
"""Release all persistent sessions used by this storage instance.
"""Release resources held by the storage instance.
After this call, the storage instance can still be used;
calling methods that use persistent sessions will cause the
persistent sessions to be reopened.
The storage instance won't be used again after this call.
"""
def poll_invalidations():
......
"""Adapt IStorage objects to IMVCCStorage
This is a largely internal implementation of ZODB, especially DB and
Connection. It takes the MVCC implementation involving invalidations
and start time and moves it into a storage adapter. This allows ZODB
to treat Relstoage and other storages in pretty much the same way and
also simplifies the implementation of the DB and Connection classes.
"""
import zope.interface
from . import interfaces, serialize, POSException
from .utils import p64, u64, Lock
class Base(object):
_copy_methods = (
'getName', 'getSize', 'history', 'lastTransaction', 'sortKey',
'loadBlob', 'openCommittedBlobFile',
'isReadOnly', 'supportsUndo', 'undoLog', 'undoInfo',
'temporaryDirectory',
)
def __init__(self, storage):
self._storage = storage
if interfaces.IBlobStorage.providedBy(storage):
zope.interface.alsoProvides(self, interfaces.IBlobStorage)
def __getattr__(self, name):
if name in self._copy_methods:
if hasattr(self._storage, name):
m = getattr(self._storage, name)
setattr(self, name, m)
return m
raise AttributeError(name)
def __len__(self):
return len(self._storage)
class MVCCAdapter(Base):
def __init__(self, storage):
Base.__init__(self, storage)
self._instances = set()
self._lock = Lock()
if hasattr(storage, 'registerDB'):
storage.registerDB(self)
def new_instance(self):
instance = MVCCAdapterInstance(self)
with self._lock:
self._instances.add(instance)
return instance
def before_instance(self, before=None):
return HistoricalStorageAdapter(self._storage, before)
def undo_instance(self):
return UndoAdapterInstance(self)
def _release(self, instance):
with self._lock:
self._instances.remove(instance)
closed = False
def close(self):
if not self.closed:
self.closed = True
self._storage.close()
del self._instances
del self._storage
def invalidateCache(self):
with self._lock:
for instance in self._instances:
instance._invalidateCache()
def invalidate(self, transaction_id, oids, version=''):
with self._lock:
for instance in self._instances:
instance._invalidate(oids)
def _invalidate_finish(self, oids, committing_instance):
with self._lock:
for instance in self._instances:
if instance is not committing_instance:
instance._invalidate(oids)
references = serialize.referencesf
transform_record_data = untransform_record_data = lambda self, data: data
def pack(self, pack_time, referencesf):
return self._storage.pack(pack_time, referencesf)
class MVCCAdapterInstance(Base):
_copy_methods = Base._copy_methods + (
'loadSerial', 'new_oid', 'tpc_vote',
'checkCurrentSerialInTransaction', 'tpc_abort',
)
def __init__(self, base):
self._base = base
Base.__init__(self, base._storage)
self._lock = Lock()
self._invalidations = set()
self._start = None # Transaction start time
self._sync = getattr(self._storage, 'sync', lambda : None)
def release(self):
self._base._release(self)
close = release
def _invalidateCache(self):
with self._lock:
self._invalidations = None
def _invalidate(self, oids):
with self._lock:
try:
self._invalidations.update(oids)
except AttributeError:
if self._invalidations is not None:
raise
def sync(self, force=True):
if force:
self._sync()
def poll_invalidations(self):
self._start = p64(u64(self._storage.lastTransaction()) + 1)
with self._lock:
if self._invalidations is None:
self._invalidations = set()
return None
else:
result = list(self._invalidations)
self._invalidations.clear()
return result
def load(self, oid, version=''):
assert self._start is not None
r = self._storage.loadBefore(oid, self._start)
if r is None:
raise POSException.ReadConflictError(repr(oid))
return r[:2]
_modified = None # Used to keep track of oids modified within a
# transaction, so we can invalidate them later.
def tpc_begin(self, transaction):
self._storage.tpc_begin(transaction)
self._modified = set()
def store(self, oid, serial, data, version, transaction):
s = self._storage.store(oid, serial, data, version, transaction)
self._modified.add(oid)
return s
def storeBlob(self, oid, serial, data, blobfilename, version, transaction):
s = self._storage.storeBlob(
oid, serial, data, blobfilename, '', transaction)
self._modified.add(oid)
return s
def tpc_finish(self, transaction, func = lambda tid: None):
modified = self._modified
self._modified = None
def invalidate_finish(tid):
self._base._invalidate_finish(modified, self)
func(tid)
self._storage.tpc_finish(transaction, invalidate_finish)
def read_only_writer(self, *a, **kw):
raise POSException.ReadOnlyError
class HistoricalStorageAdapter(Base):
"""Adapt a storage to a historical storage
"""
_copy_methods = Base._copy_methods + (
'loadSerial', 'tpc_begin', 'tpc_finish', 'tpc_abort', 'tpc_vote',
'checkCurrentSerialInTransaction',
)
def __init__(self, storage, before=None):
Base.__init__(self, storage)
self._before = before
def isReadOnly(self):
return True
def supportsUndo(self):
return False
def release(self):
pass
close = release
def sync(self, force=True):
pass
def poll_invalidations(self):
return []
new_oid = pack = store = read_only_writer
def load(self, oid, version=''):
r = self._storage.loadBefore(oid, self._before)
if r is None:
raise POSException.POSKeyError(oid)
return r[:2]
class UndoAdapterInstance(Base):
_copy_methods = Base._copy_methods + (
'tpc_abort',
)
def __init__(self, base):
self._base = base
Base.__init__(self, base._storage)
def release(self):
pass
close = release
def tpc_begin(self, transaction):
self._storage.tpc_begin(transaction)
self._undone = set()
def undo(self, transaction_id, transaction):
result = self._storage.undo(transaction_id, transaction)
if result:
self._undone.update(result[1])
return result
def tpc_vote(self, transaction):
result = self._storage.tpc_vote(transaction)
if result:
for oid, serial in result:
self._undone.add(oid)
def tpc_finish(self, transaction, func = lambda tid: None):
def invalidate_finish(tid):
self._base._invalidate_finish(self._undone, None)
func(tid)
self._storage.tpc_finish(transaction, invalidate_finish)
......@@ -17,7 +17,8 @@ Any storage that supports the history() method should be able to pass
all these tests.
"""
from time import time
import sys
from time import time, sleep
from ZODB.tests.MinPO import MinPO
class HistoryStorage:
......@@ -31,6 +32,9 @@ class HistoryStorage:
self.assertRaises(KeyError,self._storage.history,oid)
revids = [None]
for data in data:
if sys.platform == 'win32':
# time.time() has a precision of 1ms on Windows.
sleep(0.002)
revids.append(self._dostore(oid, revids[-1], MinPO(data)))
revids.reverse()
del revids[-1]
......
......@@ -74,7 +74,7 @@ You can't put blobs into a database that has uses a Non-Blob-Storage, though:
>>> transaction2.commit() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
Unsupported: Storing Blobs in <ZODB.MappingStorage.MappingStorage object at ...> is not supported.
Unsupported: Storing Blobs in ...
>>> transaction2.abort()
>>> connection2.close()
......
......@@ -25,27 +25,43 @@ Make a change locally:
>>> rt = cn.root()
>>> rt['a'] = 1
Sync is called when a connection is open, as that starts a new transaction:
Sync isn't called when a connectiin is opened, even though that
implicitly starts a new transaction:
>>> st.sync_called
False
Sync is only called when we explicitly start a new transaction:
>>> _ = transaction.begin()
>>> st.sync_called
True
>>> st.sync_called = False
BTW, calling ``sync()`` on a connectin starts a new transaction, which
caused ``sync()`` to be called on the storage:
``sync()`` is called by the Connection's ``afterCompletion()`` hook after the
commit completes.
>>> cn.sync()
>>> st.sync_called
True
>>> st.sync_called = False
``sync()`` is not called by the Connection's ``afterCompletion()``
hook after the commit completes, because we'll sunc when a new
transaction begins:
>>> transaction.commit()
>>> st.sync_called # False before 3.4
True
False
``sync()`` is also called by the ``afterCompletion()`` hook after an abort.
``sync()`` is also not called by the ``afterCompletion()`` hook after an abort.
>>> st.sync_called = False
>>> rt['b'] = 2
>>> transaction.abort()
>>> st.sync_called # False before 3.4
True
False
And ``sync()`` is called whenever we explicitly start a new transaction, via
the ``newTransaction()`` hook.
......@@ -63,45 +79,14 @@ traceback then ;-)
>>> cn.close()
One more, very obscure. It was the case that if the first action a new
threaded transaction manager saw was a ``begin()`` call, then synchronizers
registered after that in the same transaction weren't communicated to the
`Transaction` object, and so the synchronizers' ``afterCompletion()`` hooks
weren't called when the transaction commited. None of the test suites
(ZODB's, Zope 2.8's, or Zope3's) caught that, but apparently Zope 3 takes this
path at some point when serving pages.
>>> tm = transaction.ThreadTransactionManager()
>>> st.sync_called = False
>>> dummy = tm.begin() # we're doing this _before_ opening a connection
>>> cn = db.open(transaction_manager=tm)
>>> rt = cn.root() # make a change
>>> rt['c'] = 3
>>> st.sync_called
True
>>> st.sync_called = False
As a special case, if a synchronizer registers while a transaction is
in flight, then newTransaction and this the storage sync method is
called:
Now ensure that ``cn.afterCompletion() -> st.sync()`` gets called by commit
despite that the `Connection` registered after the transaction began:
>>> tm.commit()
>>> st.sync_called
True
And try the same thing with a non-threaded transaction manager:
>>> cn.close()
>>> tm = transaction.TransactionManager()
>>> st.sync_called = False
>>> dummy = tm.begin() # we're doing this _before_ opening a connection
>>> _ = tm.begin() # we're doing this _before_ opening a connection
>>> cn = db.open(transaction_manager=tm)
>>> rt = cn.root() # make a change
>>> rt['d'] = 4
>>> st.sync_called
True
>>> st.sync_called = False
>>> tm.commit()
>>> st.sync_called
True
......
......@@ -27,6 +27,8 @@ from persistent import Persistent
from zope.interface.verify import verifyObject
from zope.testing import loggingsupport, renormalizing
from .. import mvccadapter
checker = renormalizing.RENormalizing([
# Python 3 bytes add a "b".
(re.compile("b('.*?')"), r"\1"),
......@@ -154,7 +156,8 @@ class ConnectionDotAdd(ZODB.tests.util.TestCase):
self.datamgr.add(obj)
self.datamgr.tpc_begin(self.transaction)
self.datamgr.tpc_finish(self.transaction)
self.assertTrue(obj._p_oid not in self.datamgr._storage._stored)
self.assertTrue(obj._p_oid not in
self.datamgr._storage._storage._stored)
def test__resetCacheResetsReader(self):
# https://bugs.launchpad.net/zodb/+bug/142667
......@@ -435,8 +438,11 @@ class UserMethodTests(unittest.TestCase):
...
ConnectionStateError: The database connection is closed
>>> db.close()
An expedient way to create a read-only storage:
>>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
>>> db.storage.isReadOnly = lambda: True
>>> cn = db.open()
>>> cn.isReadOnly()
......@@ -510,7 +516,9 @@ class InvalidationTests(unittest.TestCase):
they have the expected effect.
>>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
>>> mvcc_storage = db._mvcc_storage
>>> cn = db.open()
>>> mvcc_instance = cn._storage
>>> p1 = Persistent()
>>> p2 = Persistent()
>>> p3 = Persistent()
......@@ -521,29 +529,18 @@ class InvalidationTests(unittest.TestCase):
Transaction ids are 8-byte strings, just like oids; p64() will
create one from an int.
>>> cn.invalidate(p64(1), {p1._p_oid: 1})
>>> mvcc_storage.invalidate(p64(1), {p1._p_oid: 1})
Transaction start times are based on storage's last
transaction. (Previousely, they were based on the first
invalidation seen in a transaction.)
>>> cn._txn_time == p64(u64(db.storage.lastTransaction()) + 1)
>>> mvcc_instance.poll_invalidations() == [p1._p_oid]
True
>>> p1._p_oid in cn._invalidated
True
>>> p2._p_oid in cn._invalidated
False
>>> cn.invalidate(p64(10), {p2._p_oid: 1, p64(76): 1})
>>> cn._txn_time == p64(u64(db.storage.lastTransaction()) + 1)
>>> mvcc_instance._start == p64(u64(db.storage.lastTransaction()) + 1)
True
>>> p1._p_oid in cn._invalidated
True
>>> p2._p_oid in cn._invalidated
True
>>> mvcc_storage.invalidate(p64(10), {p2._p_oid: 1, p64(76): 1})
Calling invalidate() doesn't affect the object state until
a transaction boundary.
......@@ -560,24 +557,24 @@ class InvalidationTests(unittest.TestCase):
>>> cn.sync()
>>> p1._p_state
-1
0
>>> p2._p_state
-1
>>> p3._p_state
0
>>> cn._invalidated
set([])
>>> db.close()
"""
def doctest_invalidateCache():
"""The invalidateCache method invalidates a connection's cache. It also
prevents reads until the end of a transaction::
"""The invalidateCache method invalidates a connection's cache.
It also prevents reads until the end of a transaction::
>>> from ZODB.tests.util import DB
>>> import transaction
>>> db = DB()
>>> mvcc_storage = db._mvcc_storage
>>> tm = transaction.TransactionManager()
>>> connection = db.open(transaction_manager=tm)
>>> connection.root()['a'] = StubObject()
......@@ -593,53 +590,33 @@ def doctest_invalidateCache():
So we have a connection and an active transaction with some modifications.
Lets call invalidateCache:
>>> connection.invalidateCache()
Now, if we try to load an object, we'll get a read conflict:
>>> connection.root()['b'].x
Traceback (most recent call last):
...
ReadConflictError: database read conflict error
If we try to commit the transaction, we'll get a conflict error:
>>> mvcc_storage.invalidateCache()
>>> tm.commit()
Traceback (most recent call last):
...
ConflictError: database conflict error
This won't have any effect until the next transaction:
and the cache will have been cleared:
>>> connection.root()['a']._p_changed
0
>>> connection.root()['b']._p_changed
>>> connection.root()['c']._p_changed
1
>>> print(connection.root()['a']._p_changed)
None
>>> print(connection.root()['b']._p_changed)
None
>>> print(connection.root()['c']._p_changed)
None
But if we sync():
But we'll be able to access data again:
>>> connection.sync()
>>> connection.root()['b'].x
1
All of our data was invalidated:
Aborting a transaction after a read conflict also lets us read data and go
on about our business:
>>> connection.root()['a']._p_changed
>>> connection.root()['b']._p_changed
>>> connection.root()['c']._p_changed
>>> connection.invalidateCache()
But we can load data as usual:
>>> connection.root()['c'].x
Traceback (most recent call last):
...
ReadConflictError: database read conflict error
Now, if we try to load an object, we'll get a read conflict:
>>> tm.abort()
>>> connection.root()['c'].x
>>> connection.root()['b'].x
1
>>> connection.root()['c'].x = 2
>>> tm.commit()
>>> db.close()
"""
......@@ -1333,6 +1310,7 @@ class StubDatabase:
def __init__(self):
self.storage = StubStorage()
self._mvcc_storage = mvccadapter.MVCCAdapter(self.storage)
self.new_oid = self.storage.new_oid
classFactory = None
......
......@@ -83,34 +83,40 @@ def test_invalidateCache():
>>> from ZODB.tests.util import DB
>>> import transaction
>>> db = DB()
>>> mvcc_storage = db._mvcc_storage
>>> tm1 = transaction.TransactionManager()
>>> c1 = db.open(transaction_manager=tm1)
>>> c1.root()['a'] = MinPO(1)
>>> tm1.commit()
>>> tm2 = transaction.TransactionManager()
>>> c2 = db.open(transaction_manager=tm2)
>>> c1.root()['a']._p_deactivate()
>>> c2.root()['a'].value
1
>>> tm3 = transaction.TransactionManager()
>>> c3 = db.open(transaction_manager=tm3)
>>> c3.root()['a'].value
1
>>> c3.close()
>>> db.invalidateCache()
>>> c1.root()['a'].value
Traceback (most recent call last):
...
ReadConflictError: database read conflict error
>>> c2.root()['a'].value
Traceback (most recent call last):
...
ReadConflictError: database read conflict error
>>> mvcc_storage.invalidateCache()
>>> c1.root.a._p_changed
0
>>> c1.sync()
>>> c1.root.a._p_changed
>>> c2.root.a._p_changed
0
>>> c2.sync()
>>> c2.root.a._p_changed
>>> c3 is db.open(transaction_manager=tm3)
True
>>> print(c3.root()['a']._p_changed)
None
>>> c3.root.a._p_changed
>>> c1.root()['a'].value
1
>>> c2.root()['a'].value
1
>>> c3.root()['a'].value
1
>>> db.close()
"""
......
......@@ -15,6 +15,7 @@ import doctest
import os
if os.environ.get('USE_ZOPE_TESTING_DOCTEST'):
from zope.testing import doctest
import sys
import unittest
import transaction
import ZODB.FileStorage
......@@ -303,9 +304,18 @@ class FileStorageTests(
# is based on what was cached during the first load.
self.assertEqual(storage.load(z64)[0], b'foo' if fail else b'bar')
def checkFlushNeededAfterTruncate(self):
self._storage._files.flush = lambda: None
self.checkFlushAfterTruncate(True)
# We want to be sure that the above test detects any regression
# in the code it checks, because any bug here is like a time bomb: not
# obvious, hard to reproduce, with possible data corruption.
# It's even more important that FilePool.flush() is quite aggressive and
# we'd like to optimize it when Python gets an API to flush read buffers.
# Therefore, 'checkFlushAfterTruncate' is tested in turn by another unit
# test.
# On Windows, flushing explicitely is not (always?) necessary.
if sys.platform != 'win32':
def checkFlushNeededAfterTruncate(self):
self._storage._files.flush = lambda: None
self.checkFlushAfterTruncate(True)
class FileStorageHexTests(FileStorageTests):
......@@ -390,7 +400,7 @@ class AnalyzeDotPyTest(StorageTestBase.StorageTestBase):
self._storage = ZODB.FileStorage.FileStorage("Source.fs", create=True)
def checkanalyze(self):
import types, sys
import types
from BTrees.OOBTree import OOBTree
from ZODB.scripts import analyze
......
......@@ -547,14 +547,13 @@ def loadblob_tmpstore():
>>> transaction.commit()
>>> blob_oid = root['blob']._p_oid
>>> tid = connection._storage.lastTransaction()
>>> _txn_time = connection._txn_time
Now we open a database with a TmpStore in front:
>>> database.close()
>>> from ZODB.Connection import TmpStore
>>> tmpstore = TmpStore(blob_storage, _txn_time)
>>> tmpstore = TmpStore(blob_storage)
We can access the blob correctly:
......
......@@ -38,7 +38,7 @@ originally written. The new approach is much simpler because we no
longer call load to get the current state of an object. We call
loadBefore instead, having gotten a transaction time at the start of a
transaction. As a result, the rhythm of the tests is a little odd,
because the probe a complex dance that doesn't exist any more.
because we no longer need to probe a complex dance that doesn't exist any more.
>>> from ZODB.tests.test_storage import MinimalMemoryStorage
>>> from ZODB import DB
......@@ -68,9 +68,9 @@ Now open a second connection.
>>> tm2 = transaction.TransactionManager()
>>> cn2 = db.open(transaction_manager=tm2)
>>> from ZODB.utils import p64, u64
>>> cn2._txn_time == p64(u64(st.lastTransaction()) + 1)
>>> cn2._storage._start == p64(u64(st.lastTransaction()) + 1)
True
>>> txn_time2 = cn2._txn_time
>>> txn_time2 = cn2._storage._start
Connection high-water mark
--------------------------
......@@ -85,13 +85,13 @@ storage has seen.
>>> cn = db.open()
>>> cn._txn_time == p64(u64(st.lastTransaction()) + 1)
>>> cn._storage._start == p64(u64(st.lastTransaction()) + 1)
True
>>> cn.invalidate(100, dict.fromkeys([1, 2]))
>>> cn._txn_time == p64(u64(st.lastTransaction()) + 1)
>>> cn.db()._mvcc_storage.invalidate(100, dict.fromkeys([1, 2]))
>>> cn._storage._start == p64(u64(st.lastTransaction()) + 1)
True
>>> cn.invalidate(200, dict.fromkeys([1, 2]))
>>> cn._txn_time == p64(u64(st.lastTransaction()) + 1)
>>> cn.db()._mvcc_storage.invalidate(200, dict.fromkeys([1, 2]))
>>> cn._storage._start == p64(u64(st.lastTransaction()) + 1)
True
A connection's high-water mark is set to the transaction id taken from
......@@ -105,7 +105,7 @@ but that doesn't work unless an object is modified. sync() will abort
a transaction and process invalidations.
>>> cn.sync()
>>> cn._txn_time == p64(u64(st.lastTransaction()) + 1)
>>> cn._storage._start == p64(u64(st.lastTransaction()) + 1)
True
Basic functionality
......@@ -121,14 +121,14 @@ will modify "a." The other transaction will then modify "b" and commit.
The second connection already has its high-water mark set.
>>> cn2._txn_time == txn_time2
>>> cn2._storage._start == txn_time2
True
It is safe to read "b," because it was not modified by the concurrent
transaction.
>>> r2 = cn2.root()
>>> r2["b"]._p_serial < cn2._txn_time
>>> r2["b"]._p_serial < cn2._storage._start
True
>>> r2["b"].value
1
......@@ -140,7 +140,7 @@ non-current version.
>>> r2["a"].value
1
>>> r2["a"]._p_serial < cn2._txn_time
>>> r2["a"]._p_serial < cn2._storage._start
True
We can confirm that we have a non-current revision by asking the
......@@ -153,32 +153,32 @@ It's possible to modify "a", but we get a conflict error when we
commit the transaction.
>>> r2["a"].value = 3
>>> tm2.get().commit()
>>> tm2.get().commit() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ConflictError: database conflict error (oid 0x01, class ZODB.tests.MinPO.MinPO)
ConflictError: database conflict error (oid 0x01, class ZODB.tests.MinPO...
>>> tm2.get().abort()
This example will demonstrate that we can commit a transaction if we only
modify current revisions.
>>> cn2._txn_time == p64(u64(st.lastTransaction()) + 1)
>>> cn2._storage._start == p64(u64(st.lastTransaction()) + 1)
True
>>> txn_time2 = cn2._txn_time
>>> txn_time2 = cn2._storage._start
>>> r1 = cn1.root()
>>> r1["a"].value = 3
>>> tm1.get().commit()
>>> txn = db.lastTransaction()
>>> cn2._txn_time == txn_time2
>>> cn2._storage._start == txn_time2
True
>>> r2["b"].value = r2["a"].value + 1
>>> r2["b"].value
3
>>> tm2.get().commit()
>>> cn2._txn_time == p64(u64(st.lastTransaction()) + 1)
>>> cn2._storage._start == p64(u64(st.lastTransaction()) + 1)
True
Object cache
......@@ -362,18 +362,18 @@ This test is kinda screwy because it depends on an old approach that
has changed. We'll hack the _txn_time to get the original expected
result, even though what's going on now is much simpler.
>>> cn1._txn_time = ts.lastTransaction()
>>> cn1._storage._start = ts.lastTransaction()
Once the oid is hooked, an invalidation will be delivered the next
time it is activated. The code below activates the object, then
confirms that the hook worked and that the old state was retrieved.
>>> oid in cn1._invalidated
>>> oid in cn1._storage._invalidations
False
>>> r1["b"]._p_state
-1
>>> r1["b"]._p_activate()
>>> oid in cn1._invalidated
>>> oid in cn1._storage._invalidations
True
>>> ts.count
1
......@@ -406,15 +406,15 @@ Again, once the oid is hooked, an invalidation will be delivered the next
time it is activated. The code below activates the object, but unlike the
section above, this is no older state to retrieve.
>>> oid in cn1._invalidated
>>> oid in cn1._storage._invalidations
False
>>> r1["b"]._p_state
-1
>>> cn1._txn_time = ts.lastTransaction()
>>> r1["b"]._p_activate()
>>> cn1._storage._start = ts.lastTransaction()
>>> r1["b"]._p_activate() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ReadConflictError: database read conflict error
ReadConflictError: ...
>>> db.close()
"""
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment