Commit 197d3aa2 authored by Barry Warsaw's avatar Barry Warsaw

Comment out the debugging crutches.

_zaprevision(): Fixed some silly typos.

pack(): Adopt from FileStorage, the conversion scheme of time floats
to TimeStamps.  Also use set_range() on the txnoids cursor because
it's not likely that the timestamp (i.e. revid) will actually exist,
and we just want the one just before the specified time.  Use prev()
instead of prev_dup() on the cursor.
parent 225c934a
...@@ -4,10 +4,11 @@ See Minimal.py for an implementation of Berkeley storage that does not support ...@@ -4,10 +4,11 @@ See Minimal.py for an implementation of Berkeley storage that does not support
undo or versioning. undo or versioning.
""" """
# $Revision: 1.18 $ # $Revision: 1.19 $
__version__ = '0.1' __version__ = '0.1'
import struct import struct
import time
# This uses the Dunn/Kuchling PyBSDDB v3 extension module available from # This uses the Dunn/Kuchling PyBSDDB v3 extension module available from
# http://pybsddb.sourceforge.net # http://pybsddb.sourceforge.net
...@@ -34,9 +35,9 @@ UNDOABLE_TRANSACTION = 'Y' ...@@ -34,9 +35,9 @@ UNDOABLE_TRANSACTION = 'Y'
PROTECTED_TRANSACTION = 'N' PROTECTED_TRANSACTION = 'N'
ZERO = '\0'*8 ZERO = '\0'*8
#DNE = '\377'*8 DNE = '\377'*8
# DEBUGGING # DEBUGGING
DNE = 'nonexist' # does not exist #DNE = 'nonexist' # does not exist
...@@ -160,7 +161,7 @@ class Full(BerkeleyBase): ...@@ -160,7 +161,7 @@ class Full(BerkeleyBase):
else: else:
self.__nextvid = 0L self.__nextvid = 0L
# DEBUGGING # DEBUGGING
self._nextserial = 0L #self._nextserial = 0L
def close(self): def close(self):
self._serials.close() self._serials.close()
...@@ -177,8 +178,8 @@ class Full(BerkeleyBase): ...@@ -177,8 +178,8 @@ class Full(BerkeleyBase):
def _begin(self, tid, u, d, e): def _begin(self, tid, u, d, e):
# DEBUGGING # DEBUGGING
self._nextserial += 1 #self._nextserial += 1
self._serial = utils.p64(self._nextserial) #self._serial = utils.p64(self._nextserial)
# Begin the current transaction. Currently, this just makes sure that # Begin the current transaction. Currently, this just makes sure that
# the commit log is in the proper state. # the commit log is in the proper state.
if self._commitlog is None: if self._commitlog is None:
...@@ -845,7 +846,7 @@ class Full(BerkeleyBase): ...@@ -845,7 +846,7 @@ class Full(BerkeleyBase):
# perform cascading decrefs on the referenced objects. # perform cascading decrefs on the referenced objects.
# #
# We need the lrevid which points to the pickle for this revision... # We need the lrevid which points to the pickle for this revision...
vid, nvrevid, lrevid = self._metadata.get(key)[16:24] lrevid = self._metadata.get(key)[16:24]
# ...and now delete the metadata record for this object revision # ...and now delete the metadata record for this object revision
self._metadata.delete(key) self._metadata.delete(key)
# Decref the reference count of the pickle pointed to by oid+lrevid. # Decref the reference count of the pickle pointed to by oid+lrevid.
...@@ -870,7 +871,7 @@ class Full(BerkeleyBase): ...@@ -870,7 +871,7 @@ class Full(BerkeleyBase):
# Sniff the pickle to get the objects it refers to # Sniff the pickle to get the objects it refers to
collectables = [] collectables = []
refoids = [] refoids = []
referencesf(pickle, oids) referencesf(pickle, refoids)
# Now decref the reference counts for each of those objects. If it # Now decref the reference counts for each of those objects. If it
# goes to zero, remember the oid so we can recursively zap its # goes to zero, remember the oid so we can recursively zap its
# metadata too. # metadata too.
...@@ -923,18 +924,23 @@ class Full(BerkeleyBase): ...@@ -923,18 +924,23 @@ class Full(BerkeleyBase):
c.close() c.close()
def pack(self, t, referencesf): def pack(self, t, referencesf):
# t is a TimeTime, or time float, convert this to a TimeStamp object,
# using an algorithm similar to what's used in FileStorage. The
# TimeStamp can then be used as a key in the txnMetadata table, since
# we know our revision id's are actually TimeStamps.
t0 = TimeStamp(*(time.gmtime(t)[:5] + (t%60,)))
self._lock_acquire() self._lock_acquire()
c = None c = None
tidmarks = {} tidmarks = {}
try: try:
# Figure out when to pack to. We happen to know that our # Figure out when to pack to. We happen to know that our
# transaction ids are really timestamps. # transaction ids are really timestamps.
t0 = TimeStamp(t)
c = self._txnoids.cursor() c = self._txnoids.cursor()
rec = c.set(`t0`) # Need to use the repr of the TimeStamp so we get a string
rec = c.set_range(`t0`)
while rec: while rec:
tid, oid = rec tid, oid = rec
rec = c.prev_dup() rec = c.prev()
# We need to mark this transaction as having participated in a # We need to mark this transaction as having participated in a
# pack, so that undo will not create a temporal anomaly. # pack, so that undo will not create a temporal anomaly.
if not tidmarks.has_key(tid): if not tidmarks.has_key(tid):
......
...@@ -4,10 +4,11 @@ See Minimal.py for an implementation of Berkeley storage that does not support ...@@ -4,10 +4,11 @@ See Minimal.py for an implementation of Berkeley storage that does not support
undo or versioning. undo or versioning.
""" """
# $Revision: 1.18 $ # $Revision: 1.19 $
__version__ = '0.1' __version__ = '0.1'
import struct import struct
import time
# This uses the Dunn/Kuchling PyBSDDB v3 extension module available from # This uses the Dunn/Kuchling PyBSDDB v3 extension module available from
# http://pybsddb.sourceforge.net # http://pybsddb.sourceforge.net
...@@ -34,9 +35,9 @@ UNDOABLE_TRANSACTION = 'Y' ...@@ -34,9 +35,9 @@ UNDOABLE_TRANSACTION = 'Y'
PROTECTED_TRANSACTION = 'N' PROTECTED_TRANSACTION = 'N'
ZERO = '\0'*8 ZERO = '\0'*8
#DNE = '\377'*8 DNE = '\377'*8
# DEBUGGING # DEBUGGING
DNE = 'nonexist' # does not exist #DNE = 'nonexist' # does not exist
...@@ -160,7 +161,7 @@ class Full(BerkeleyBase): ...@@ -160,7 +161,7 @@ class Full(BerkeleyBase):
else: else:
self.__nextvid = 0L self.__nextvid = 0L
# DEBUGGING # DEBUGGING
self._nextserial = 0L #self._nextserial = 0L
def close(self): def close(self):
self._serials.close() self._serials.close()
...@@ -177,8 +178,8 @@ class Full(BerkeleyBase): ...@@ -177,8 +178,8 @@ class Full(BerkeleyBase):
def _begin(self, tid, u, d, e): def _begin(self, tid, u, d, e):
# DEBUGGING # DEBUGGING
self._nextserial += 1 #self._nextserial += 1
self._serial = utils.p64(self._nextserial) #self._serial = utils.p64(self._nextserial)
# Begin the current transaction. Currently, this just makes sure that # Begin the current transaction. Currently, this just makes sure that
# the commit log is in the proper state. # the commit log is in the proper state.
if self._commitlog is None: if self._commitlog is None:
...@@ -845,7 +846,7 @@ class Full(BerkeleyBase): ...@@ -845,7 +846,7 @@ class Full(BerkeleyBase):
# perform cascading decrefs on the referenced objects. # perform cascading decrefs on the referenced objects.
# #
# We need the lrevid which points to the pickle for this revision... # We need the lrevid which points to the pickle for this revision...
vid, nvrevid, lrevid = self._metadata.get(key)[16:24] lrevid = self._metadata.get(key)[16:24]
# ...and now delete the metadata record for this object revision # ...and now delete the metadata record for this object revision
self._metadata.delete(key) self._metadata.delete(key)
# Decref the reference count of the pickle pointed to by oid+lrevid. # Decref the reference count of the pickle pointed to by oid+lrevid.
...@@ -870,7 +871,7 @@ class Full(BerkeleyBase): ...@@ -870,7 +871,7 @@ class Full(BerkeleyBase):
# Sniff the pickle to get the objects it refers to # Sniff the pickle to get the objects it refers to
collectables = [] collectables = []
refoids = [] refoids = []
referencesf(pickle, oids) referencesf(pickle, refoids)
# Now decref the reference counts for each of those objects. If it # Now decref the reference counts for each of those objects. If it
# goes to zero, remember the oid so we can recursively zap its # goes to zero, remember the oid so we can recursively zap its
# metadata too. # metadata too.
...@@ -923,18 +924,23 @@ class Full(BerkeleyBase): ...@@ -923,18 +924,23 @@ class Full(BerkeleyBase):
c.close() c.close()
def pack(self, t, referencesf): def pack(self, t, referencesf):
# t is a TimeTime, or time float, convert this to a TimeStamp object,
# using an algorithm similar to what's used in FileStorage. The
# TimeStamp can then be used as a key in the txnMetadata table, since
# we know our revision id's are actually TimeStamps.
t0 = TimeStamp(*(time.gmtime(t)[:5] + (t%60,)))
self._lock_acquire() self._lock_acquire()
c = None c = None
tidmarks = {} tidmarks = {}
try: try:
# Figure out when to pack to. We happen to know that our # Figure out when to pack to. We happen to know that our
# transaction ids are really timestamps. # transaction ids are really timestamps.
t0 = TimeStamp(t)
c = self._txnoids.cursor() c = self._txnoids.cursor()
rec = c.set(`t0`) # Need to use the repr of the TimeStamp so we get a string
rec = c.set_range(`t0`)
while rec: while rec:
tid, oid = rec tid, oid = rec
rec = c.prev_dup() rec = c.prev()
# We need to mark this transaction as having participated in a # We need to mark this transaction as having participated in a
# pack, so that undo will not create a temporal anomaly. # pack, so that undo will not create a temporal anomaly.
if not tidmarks.has_key(tid): if not tidmarks.has_key(tid):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment