Commit 7db4484c authored by Tim Peters's avatar Tim Peters

Merge rev 30255 from 3.4 branch.

ISynchronizer grows a newTransaction() method, called
whenever TransactionManager.begin() is called.

Connection implements that, and changes its ISynchronizer
afterCompletion() method, to call sync() on its storage
(if the storage has such a method), and to process
invalidations in any case.

The bottom line is that storage sync() will get done "by
magic" now after top-level commit() and abort(), and after
explicit TransactionManager.begin().  This should make it
possible to deprecate Connection.sync(), although I'm not
doing that yet.  Made a small but meaningful start by
purging many sync() calls from some of the nastiest ZEO
tests -- and they still work fine.
parent 82919885
......@@ -10,9 +10,6 @@ Release date: DD-MMM-2005
transaction
-----------
- A ``getBeforeCommitHooks()`` method was added. It returns an iterable
producing the registered beforeCommit hooks.
- Doing a subtransaction commit erroneously processed invalidations, which
could lead to an inconsistent view of the database. For example, let T be
the transaction of which the subtransaction commit was a part. If T read a
......@@ -29,6 +26,24 @@ transaction
could fail, and despite that T never modifed O.
- A ``getBeforeCommitHooks()`` method was added. It returns an iterable
producing the registered beforeCommit hooks.
- The ``ISynchronizer`` interface has a new ``newTransaction()`` method.
This is invoked whenever a transaction manager's ``begin()`` method is
called. (Note that a transaction object's (as opposed to a transaction
manager's) ``begin()`` method is deprecated, and ``newTransaction()``
is not called when using the deprecated method.)
- Relatedly, ``Connection`` implements ``ISynchronizer``, and ``Connection``'s
``afterCompletion()`` and ``newTransaction()`` methods now call ``sync()``
on the underlying storage (if the underlying storage has such a method),
in addition to processing invalidations. The practical implication is that
storage synchronization will be done automatically now, whenever a
transaction is explicitly started, and after top-level transaction commit
or abort. As a result, ``Connection.sync()`` should virtually never be
needed anymore, and will eventually be deprecated.
What's new in ZODB3 3.4a5?
==========================
......
......@@ -640,6 +640,7 @@ class ConnectionTests(CommonSetupTearDown):
r1["a"] = MinPO("a")
transaction.commit()
self.assertEqual(r1._p_state, 0) # up-to-date
db2 = DB(self.openClientStorage())
r2 = db2.open().root()
......@@ -649,18 +650,16 @@ class ConnectionTests(CommonSetupTearDown):
r2["b"] = MinPO("b")
transaction.commit()
# make sure the invalidation is received in the other client
# Make sure the invalidation is received in the other client.
for i in range(10):
c1._storage.sync()
if c1._invalidated.has_key(r1._p_oid):
if r1._p_state == -1:
break
time.sleep(0.1)
self.assert_(c1._invalidated.has_key(r1._p_oid))
self.assertEqual(r1._p_state, -1) # ghost
# force the invalidations to be applied...
c1.sync()
r1.keys() # unghostify
self.assertEqual(r1._p_serial, r2._p_serial)
self.assertEqual(r1["b"].value, "b")
db2.close()
db1.close()
......
......@@ -88,8 +88,7 @@ class StressTask:
try:
self.tm.get().commit()
except ConflictError, msg:
self.tm.get().abort()
cn.sync()
self.tm.abort()
else:
if self.sleep:
time.sleep(self.sleep)
......@@ -152,7 +151,6 @@ class StressThread(FailableThread):
break
except (ConflictError, KeyError):
transaction.abort()
cn.sync()
key = self.startnum
while not self.stop.isSet():
try:
......@@ -164,11 +162,6 @@ class StressThread(FailableThread):
time.sleep(self.sleep)
except (ReadConflictError, ConflictError), msg:
transaction.abort()
# sync() is necessary here to process invalidations
# if we get a read conflict. In the read conflict case,
# no objects were modified so cn never got registered
# with the transaction.
cn.sync()
else:
self.added_keys.append(key)
key += self.step
......@@ -201,7 +194,6 @@ class LargeUpdatesThread(FailableThread):
except (ConflictError, KeyError):
# print "%d getting tree abort" % self.threadnum
transaction.abort()
cn.sync()
keys_added = {} # set of keys we commit
tkeys = []
......@@ -223,7 +215,6 @@ class LargeUpdatesThread(FailableThread):
except (ReadConflictError, ConflictError), msg:
# print "%d setting key %s" % (self.threadnum, msg)
transaction.abort()
cn.sync()
break
else:
# print "%d set #%d" % (self.threadnum, len(keys))
......@@ -236,16 +227,10 @@ class LargeUpdatesThread(FailableThread):
except ConflictError, msg:
# print "%d commit %s" % (self.threadnum, msg)
transaction.abort()
cn.sync()
continue
for k in keys:
tkeys.remove(k)
keys_added[k] = 1
# sync() is necessary here to process invalidations
# if we get a read conflict. In the read conflict case,
# no objects were modified so cn never got registered
# with the transaction.
cn.sync()
self.added_keys = keys_added.keys()
cn.close()
......@@ -287,7 +272,6 @@ class VersionStressThread(FailableThread):
break
except (ConflictError, KeyError):
transaction.abort()
cn.sync()
while not self.stop.isSet():
try:
tree[key] = self.threadnum
......@@ -297,11 +281,6 @@ class VersionStressThread(FailableThread):
break
except (VersionLockError, ReadConflictError, ConflictError), msg:
transaction.abort()
# sync() is necessary here to process invalidations
# if we get a read conflict. In the read conflict case,
# no objects were modified so cn never got registered
# with the transaction.
cn.sync()
if self.sleep:
time.sleep(self.sleep)
try:
......@@ -319,7 +298,6 @@ class VersionStressThread(FailableThread):
return commit
except ConflictError, msg:
transaction.abort()
cn.sync()
finally:
cn.close()
return 0
......@@ -351,7 +329,6 @@ class InvalidationTests:
except ReadConflictError:
if retries:
transaction.abort()
cn.sync()
else:
raise
except:
......
......@@ -27,7 +27,9 @@ from persistent import PickleCache
# interfaces
from persistent.interfaces import IPersistentDataManager
from ZODB.interfaces import IConnection
from transaction.interfaces import ISavepointDataManager, IDataManagerSavepoint
from transaction.interfaces import ISavepointDataManager
from transaction.interfaces import IDataManagerSavepoint
from transaction.interfaces import ISynchronizer
from zope.interface import implements
import transaction
......@@ -59,7 +61,10 @@ def resetCaches():
class Connection(ExportImport, object):
"""Connection to ZODB for loading and storing objects."""
implements(IConnection, ISavepointDataManager, IPersistentDataManager)
implements(IConnection,
ISavepointDataManager,
IPersistentDataManager,
ISynchronizer)
_storage = _normal_storage = _savepoint_storage = None
......@@ -291,11 +296,8 @@ class Connection(ExportImport, object):
def sync(self):
"""Manually update the view on the database."""
self._txn_mgr.get().abort()
sync = getattr(self._storage, 'sync', 0)
if sync:
sync()
self._flush_invalidations()
self._txn_mgr.abort()
self._storage_sync()
def getDebugInfo(self):
"""Returns a tuple with different items for debugging the
......@@ -379,6 +381,7 @@ class Connection(ExportImport, object):
self._needs_to_join = True
self._registered_objects = []
# Process pending invalidations.
def _flush_invalidations(self):
self._inv_lock.acquire()
try:
......@@ -650,10 +653,19 @@ class Connection(ExportImport, object):
# We don't do anything before a commit starts.
pass
def afterCompletion(self, txn):
# Call the underlying storage's sync() method (if any), and process
# pending invalidations regardless. Of course this should only be
# called at transaction boundaries.
def _storage_sync(self, *ignored):
sync = getattr(self._storage, 'sync', 0)
if sync:
sync()
self._flush_invalidations()
# Transaction-manager synchronization -- ISynchronizer
afterCompletion = _storage_sync
newTransaction = _storage_sync
# Transaction-manager synchronization -- ISynchronizer
##########################################################################
##########################################################################
......
......@@ -279,8 +279,8 @@ class IConnection(Interface):
"""Manually update the view on the database.
This includes aborting the current transaction, getting a fresh and
consistent view of the data (synchronizing with the storage if possible)
and call cacheGC() for this connection.
consistent view of the data (synchronizing with the storage if
possible) and calling cacheGC() for this connection.
This method was especially useful in ZODB 3.2 to better support
read-only connections that were affected by a couple of problems.
......
Here are some tests that storage sync() methods get called at appropriate
times in the life of a transaction. The tested behavior is new in ZODB 3.4.
First define a lightweight storage with a sync() method:
>>> import ZODB
>>> from ZODB.MappingStorage import MappingStorage
>>> import transaction
>>> class SimpleStorage(MappingStorage):
... sync_called = False
...
... def sync(self, *args):
... self.sync_called = True
Make a change locally:
>>> st = SimpleStorage()
>>> db = ZODB.DB(st)
>>> cn = db.open()
>>> rt = cn.root()
>>> rt['a'] = 1
Sync should not have been called yet.
>>> st.sync_called # False before 3.4
False
sync is called by the Connection's afterCompletion() hook after the commit
completes.
>>> transaction.commit()
>>> st.sync_called # False before 3.4
True
sync is also called by the afterCompletion() hook after an abort.
>>> st.sync_called = False
>>> rt['b'] = 2
>>> transaction.abort()
>>> st.sync_called # False before 3.4
True
And sync is called whenever we explicitly start a new txn, via the
newTransaction() hook.
>>> st.sync_called = False
>>> dummy = transaction.begin()
>>> st.sync_called # False before 3.4
True
Clean up. Closing db isn't enough -- closing a DB doesn't close its
Connections. Leaving our Connection open here can cause the
SimpleStorage.sync() method to get called later, during another test, and
our doctest-synthesized module globals no longer exist then. You get
a weird traceback then ;-)
>>> cn.close()
>>> db.close()
......@@ -21,19 +21,18 @@ import persistent.dict, transaction
def testAddingThenModifyThenAbort():
"""\
We ran into a problem in which abort failed after adding an object in
a savepoint and then modifying the object. The problem was that, on
a savepoint and then modifying the object. The problem was that, on
commit, the savepoint was aborted before the modifications were
aborted. Because the object was added in the savepoint, it's _p_oid
aborted. Because the object was added in the savepoint, its _p_oid
and _p_jar were cleared when the savepoint was aborted. The object
was in the registered-object list. There's an invariant for this
lists that states that all objects in the list should have an oid and
list that states that all objects in the list should have an oid and
(correct) jar.
The fix was to abort work done after he savepoint before aborting the
The fix was to abort work done after the savepoint before aborting the
savepoint.
>>> import ZODB.tests.util
>>> db = ZODB.tests.util.DB()
>>> connection = db.open()
......@@ -44,21 +43,19 @@ savepoint.
>>> sp = transaction.savepoint()
>>> ob.x = 1
>>> transaction.abort()
"""
def testModifyThenSavePointThenModifySomeMoreThenCommit():
"""\
We got conflict errors when we committed after we modified an object
in a savepoint and then modified it some more after the last
in a savepoint, and then modified it some more after the last
savepoint.
The problem was that we were effectively commiting the object twice --
when commiting the current data and when committing the savepoint.
The fix was to first make a new savepoint to move new changes to the
savepoint storage and *then* to commit the savepoint storage. (This is
similar to thr strategy that was used for subtransactions prior to
similar to the strategy that was used for subtransactions prior to
savepoints.)
......@@ -71,7 +68,6 @@ savepoints.)
>>> sp = transaction.savepoint()
>>> root['a'] = 2
>>> transaction.commit()
"""
def test_suite():
......@@ -82,4 +78,3 @@ def test_suite():
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
......@@ -44,7 +44,7 @@ It allows deposits and debits to be entered for multiple people.
It accepts a sequence of entries and generates a sequence of status
messages. For each entry, it applies the change and then validates
the user's account. If the user's account is invalid, we role back
the change for that entry. The success or failure of an entry is
the change for that entry. The success or failure of an entry is
indicated in the output status. First we'll initialize some accounts:
>>> root['bob-balance'] = 0.0
......@@ -60,7 +60,7 @@ Now, we'll define a validation function to validate an account:
... raise ValueError('Overdrawn', name)
And a function to apply entries. If the function fails in some
unexpected way, it rolls back all of it's changes and
unexpected way, it rolls back all of it's changes and
prints the error:
>>> def apply_entries(entries):
......@@ -102,7 +102,7 @@ Now let's try applying some entries:
>>> root['sally-balance']
-80.0
If we give provide entries that cause an unexpected error:
>>> apply_entries([
......@@ -115,7 +115,7 @@ If we give provide entries that cause an unexpected error:
Updated sally
Unexpected exception unsupported operand type(s) for +=: 'float' and 'str'
Because the apply_entries used a savepoint for the entire function,
Because the apply_entries used a savepoint for the entire function,
it was able to rollback the partial changes without rolling back
changes made in the previous call to apply_entries:
......
......@@ -17,4 +17,5 @@ from zope.testing.doctestunit import DocFileSuite
def test_suite():
return DocFileSuite("dbopen.txt",
"multidb.txt",
"synchronizers.txt",
)
......@@ -32,6 +32,16 @@ from transaction._transaction import Transaction
# Obscure: because of the __init__.py maze, we can't import WeakSet
# at top level here.
# Call the ISynchronizer newTransaction() method on every element of
# WeakSet synchs.
# A transaction manager needs to do this whenever begin() is called.
# Since it would be good if tm.get() returned the new transaction while
# newTransaction() is running, calling this has to be delayed until after
# the transaction manager has done whatever it needs to do to make its
# get() return the new txn.
def _new_transaction(txn, synchs):
synchs.map(lambda s: s.newTransaction(txn))
class TransactionManager(object):
def __init__(self):
......@@ -43,8 +53,9 @@ class TransactionManager(object):
def begin(self):
if self._txn is not None:
self._txn.abort()
self._txn = Transaction(self._synchs, self)
return self._txn
txn = self._txn = Transaction(self._synchs, self)
_new_transaction(txn, self._synchs)
return txn
def get(self):
if self._txn is None:
......@@ -91,6 +102,7 @@ class ThreadTransactionManager(TransactionManager):
txn.abort()
synchs = self._synchs.get(tid)
txn = self._txns[tid] = Transaction(synchs, self)
_new_transaction(txn, synchs)
return txn
def get(self):
......
......@@ -28,6 +28,9 @@ class ITransactionManager(zope.interface.Interface):
"""Begin a new transaction.
If an existing transaction is in progress, it will be aborted.
The newTransaction() method of registered synchronizers is called,
passing the new transaction object.
"""
def get():
......@@ -55,15 +58,15 @@ class ITransactionManager(zope.interface.Interface):
def registerSynch(synch):
"""Register an ISynchronizer.
Synchronizers are notified at the beginning and end of
transaction completion.
Synchronizers are notified about some major events in a transaction's
life. See ISynchronizer for details.
"""
def unregisterSynch(synch):
"""Unregister an ISynchronizer.
Synchronizers are notified at the beginning and end of
transaction completion.
Synchronizers are notified about some major events in a transaction's
life. See ISynchronizer for details.
"""
class ITransaction(zope.interface.Interface):
......@@ -365,3 +368,10 @@ class ISynchronizer(zope.interface.Interface):
def afterCompletion(transaction):
"""Hook that is called by the transaction after completing a commit.
"""
def newTransaction(transaction):
"""Hook that is called at the start of a transaction.
This hook is called when, and only when, a transaction manager's
begin() method is called explictly.
"""
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment