Commit e49710b4 authored by Barry Warsaw's avatar Barry Warsaw

Fixes to close a pack-related race condition. If new objects were

added while a pack was stuck between a mark and a sweep, the mark
would not have found the new objects as root reachable (because they
wouldn't have been stored yet), but the sweep will find then and
erroneously delete them.

_setupDBs(): Add a _packing flag which gets set whenever we're doing a
classic or auto pack

_docommit(): When the _packing flag is set, copy all oids in the _oids
table to the _packmark table, so any objects added between the mark
and sweep phases will not be gc'd.  This is fine because if they're
still not root reachable by the next gc pass, they'll get collected
then.

pack(): Set and reset the _packing flag around the calls to _dopack().

Also, get ZERO from the package, and add the `info' table for storage
metadata.

_docommit(): Fix a reference counting bug caused by not incref'ing the
objects referred to by the new pickle of a new object revision.
parent 80e75665
############################################################################## ##############################################################################
# #
# Copyright (c) 2001, 2002 Zope Corporation and Contributors. # Copyright (c) 2001 Zope Corporation and Contributors.
# All Rights Reserved. # All Rights Reserved.
# #
# This software is subject to the provisions of the Zope Public License, # This software is subject to the provisions of the Zope Public License,
...@@ -15,20 +15,19 @@ ...@@ -15,20 +15,19 @@
"""Berkeley storage without undo or versioning. """Berkeley storage without undo or versioning.
""" """
__version__ = '$Revision: 1.24 $'[-2:][0] __version__ = '$Revision: 1.25 $'[-2:][0]
from ZODB import POSException from ZODB import POSException
from ZODB.utils import p64, U64 from ZODB.utils import p64, U64
from ZODB.referencesf import referencesf from ZODB.referencesf import referencesf
from ZODB.ConflictResolution import ConflictResolvingStorage, ResolvedSerial from ZODB.ConflictResolution import ConflictResolvingStorage, ResolvedSerial
from BDBStorage import db from BDBStorage import db, ZERO
from BerkeleyBase import BerkeleyBase, PackStop, _WorkThread from BerkeleyBase import BerkeleyBase, PackStop, _WorkThread
ABORT = 'A' ABORT = 'A'
COMMIT = 'C' COMMIT = 'C'
PRESENT = 'X' PRESENT = 'X'
ZERO = '\0'*8
try: try:
True, False True, False
...@@ -78,6 +77,13 @@ class BDBMinimalStorage(BerkeleyBase, ConflictResolvingStorage): ...@@ -78,6 +77,13 @@ class BDBMinimalStorage(BerkeleyBase, ConflictResolvingStorage):
# no pending entry. It is a database invariant that if the # no pending entry. It is a database invariant that if the
# pending table is empty, the oids table must also be empty. # pending table is empty, the oids table must also be empty.
# #
# info -- {key -> value}
# This table contains storage metadata information. The keys and
# values are simple strings of variable length. Here are the
# valid keys:
#
# version - the version of the database (reserved for ZODB4)
#
# packmark -- [oid] # packmark -- [oid]
# Every object reachable from the root during a classic pack # Every object reachable from the root during a classic pack
# operation will have its oid present in this table. # operation will have its oid present in this table.
...@@ -89,6 +95,8 @@ class BDBMinimalStorage(BerkeleyBase, ConflictResolvingStorage): ...@@ -89,6 +95,8 @@ class BDBMinimalStorage(BerkeleyBase, ConflictResolvingStorage):
# references exist, such that the objects can be completely packed # references exist, such that the objects can be completely packed
# away. # away.
# #
self._packing = False
self._info = self._setupDB('info')
self._serials = self._setupDB('serials', db.DB_DUP) self._serials = self._setupDB('serials', db.DB_DUP)
self._pickles = self._setupDB('pickles') self._pickles = self._setupDB('pickles')
self._refcounts = self._setupDB('refcounts') self._refcounts = self._setupDB('refcounts')
...@@ -169,7 +177,15 @@ class BDBMinimalStorage(BerkeleyBase, ConflictResolvingStorage): ...@@ -169,7 +177,15 @@ class BDBMinimalStorage(BerkeleyBase, ConflictResolvingStorage):
soid, stid = srec soid, stid = srec
if soid <> oid: if soid <> oid:
break break
if stid <> tid: if stid == tid:
# This is the current revision of the object, so
# increment the refcounts of all referents
data = self._pickles.get(oid+stid, txn=txn)
assert data is not None
self._update(deltas, data, 1)
else:
# This is the previous revision of the object, so
# decref its referents and clean up its pickles.
cs.delete() cs.delete()
data = self._pickles.get(oid+stid, txn=txn) data = self._pickles.get(oid+stid, txn=txn)
assert data is not None assert data is not None
...@@ -187,8 +203,16 @@ class BDBMinimalStorage(BerkeleyBase, ConflictResolvingStorage): ...@@ -187,8 +203,16 @@ class BDBMinimalStorage(BerkeleyBase, ConflictResolvingStorage):
if co: co.close() if co: co.close()
if cs: cs.close() if cs: cs.close()
# We're done with this table # We're done with this table
self._oids.truncate(txn)
self._pending.truncate(txn) self._pending.truncate(txn)
# If we're in the middle of a pack, we need to add to the packmark
# table any objects that were modified in this transaction.
# Otherwise, there's a race condition where mark might have happened,
# then the object is added, then sweep runs, deleting the object
# created in the interrim.
if self._packing:
for oid in self._oids.keys():
self._packmark.put(oid, PRESENT, txn=txn)
self._oids.truncate(txn)
# Now, to finish up, we need apply the refcount deltas to the # Now, to finish up, we need apply the refcount deltas to the
# refcounts table, and do recursive collection of all refcount == 0 # refcounts table, and do recursive collection of all refcount == 0
# objects. # objects.
...@@ -350,6 +374,7 @@ class BDBMinimalStorage(BerkeleyBase, ConflictResolvingStorage): ...@@ -350,6 +374,7 @@ class BDBMinimalStorage(BerkeleyBase, ConflictResolvingStorage):
# A simple wrapper around the bulk of packing, but which acquires a # A simple wrapper around the bulk of packing, but which acquires a
# lock that prevents multiple packs from running at the same time. # lock that prevents multiple packs from running at the same time.
self._packlock.acquire() self._packlock.acquire()
self._packing = True
try: try:
# We don't wrap this in _withtxn() because we're going to do the # We don't wrap this in _withtxn() because we're going to do the
# operation across several Berkeley transactions, which allows # operation across several Berkeley transactions, which allows
...@@ -360,6 +385,7 @@ class BDBMinimalStorage(BerkeleyBase, ConflictResolvingStorage): ...@@ -360,6 +385,7 @@ class BDBMinimalStorage(BerkeleyBase, ConflictResolvingStorage):
# collect object revisions # collect object revisions
self._dopack() self._dopack()
finally: finally:
self._packing = False
self._packlock.release() self._packlock.release()
self.log('classic pack finished') self.log('classic pack finished')
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment