Commit c9726cb5 authored by Julien Muchembled's avatar Julien Muchembled

Merge 4.x branch

parents f371a71c a2da8235
...@@ -56,7 +56,16 @@ Concurrency Control (MVCC) implementation: ...@@ -56,7 +56,16 @@ Concurrency Control (MVCC) implementation:
This change allows server-nased storages like ZEO and NEO to be This change allows server-nased storages like ZEO and NEO to be
implemented more simply and cleanly. implemented more simply and cleanly.
4.4.2 (unreleased) 4.4.3 (unreleased)
==================
- Internal FileStorage-undo fixes that should allow undo in some cases
where it didn't work before.
- fstail: print the txn offset and header size, instead of only the data offset.
fstail can now be used to truncate a DB at the right offset.
4.4.2 (2016-07-08)
================== ==================
Better support of the new commit protocol. This fixes issues with blobs and Better support of the new commit protocol. This fixes issues with blobs and
......
...@@ -18,7 +18,8 @@ import six ...@@ -18,7 +18,8 @@ import six
import zope.interface import zope.interface
from ZODB.POSException import ConflictError from ZODB.POSException import ConflictError
from ZODB.loglevels import BLATHER from ZODB.loglevels import BLATHER
from ZODB._compat import BytesIO, PersistentUnpickler, PersistentPickler, _protocol from ZODB._compat import (
BytesIO, PersistentUnpickler, PersistentPickler, _protocol)
# Subtle: Python 2.x has pickle.PicklingError and cPickle.PicklingError, # Subtle: Python 2.x has pickle.PicklingError and cPickle.PicklingError,
# and these are unrelated classes! So we shouldn't use pickle.PicklingError, # and these are unrelated classes! So we shouldn't use pickle.PicklingError,
...@@ -71,7 +72,8 @@ def state(self, oid, serial, prfactory, p=''): ...@@ -71,7 +72,8 @@ def state(self, oid, serial, prfactory, p=''):
p = p or self.loadSerial(oid, serial) p = p or self.loadSerial(oid, serial)
p = self._crs_untransform_record_data(p) p = self._crs_untransform_record_data(p)
file = BytesIO(p) file = BytesIO(p)
unpickler = PersistentUnpickler(find_global, prfactory.persistent_load, file) unpickler = PersistentUnpickler(
find_global, prfactory.persistent_load, file)
unpickler.load() # skip the class tuple unpickler.load() # skip the class tuple
return unpickler.load() return unpickler.load()
...@@ -239,7 +241,8 @@ def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle, ...@@ -239,7 +241,8 @@ def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
prfactory = PersistentReferenceFactory() prfactory = PersistentReferenceFactory()
newpickle = self._crs_untransform_record_data(newpickle) newpickle = self._crs_untransform_record_data(newpickle)
file = BytesIO(newpickle) file = BytesIO(newpickle)
unpickler = PersistentUnpickler(find_global, prfactory.persistent_load, file) unpickler = PersistentUnpickler(
find_global, prfactory.persistent_load, file)
meta = unpickler.load() meta = unpickler.load()
if isinstance(meta, tuple): if isinstance(meta, tuple):
klass = meta[0] klass = meta[0]
......
...@@ -67,9 +67,14 @@ class TxnHeader: ...@@ -67,9 +67,14 @@ class TxnHeader:
self._ext = self._file.read(self.ext_len) self._ext = self._file.read(self.ext_len)
self.ext = loads(self._ext) self.ext = loads(self._ext)
def get_offset(self):
return self._pos
def __len__(self):
return TRANS_HDR_LEN + self.user_len + self.descr_len + self.ext_len
def get_data_offset(self): def get_data_offset(self):
return (self._pos + TRANS_HDR_LEN + self.user_len + self.descr_len return self._pos + len(self)
+ self.ext_len)
def get_timestamp(self): def get_timestamp(self):
return TimeStamp(self.tid) return TimeStamp(self.tid)
......
...@@ -36,8 +36,8 @@ def main(path, ntxn): ...@@ -36,8 +36,8 @@ def main(path, ntxn):
th.read_meta() th.read_meta()
print("%s: hash=%s" % (th.get_timestamp(), print("%s: hash=%s" % (th.get_timestamp(),
binascii.hexlify(hash).decode())) binascii.hexlify(hash).decode()))
print(("user=%r description=%r length=%d offset=%d" print(("user=%r description=%r length=%d offset=%d (+%d)"
% (th.user, th.descr, th.length, th.get_data_offset()))) % (th.user, th.descr, th.length, th.get_offset(), len(th))))
print() print()
th = th.prev_txn() th = th.prev_txn()
i -= 1 i -= 1
......
...@@ -24,10 +24,10 @@ Now lets have a look at the last transactions of this FileStorage: ...@@ -24,10 +24,10 @@ Now lets have a look at the last transactions of this FileStorage:
>>> from ZODB.scripts.fstail import main >>> from ZODB.scripts.fstail import main
>>> main(storagefile, 5) >>> main(storagefile, 5)
2007-11-10 15:18:48.543001: hash=b16422d09fabdb45d4e4325e4b42d7d6f021d3c3 2007-11-10 15:18:48.543001: hash=b16422d09fabdb45d4e4325e4b42d7d6f021d3c3
user='' description='' length=132 offset=185 user='' description='' length=132 offset=162 (+23)
<BLANKLINE> <BLANKLINE>
2007-11-10 15:18:48.543001: hash=b16422d09fabdb45d4e4325e4b42d7d6f021d3c3 2007-11-10 15:18:48.543001: hash=b16422d09fabdb45d4e4325e4b42d7d6f021d3c3
user='' description='initial database creation' length=150 offset=52 user='' description='initial database creation' length=150 offset=4 (+48)
<BLANKLINE> <BLANKLINE>
Now clean up the storage again: Now clean up the storage again:
......
...@@ -29,13 +29,13 @@ checker = zope.testing.renormalizing.RENormalizing([ ...@@ -29,13 +29,13 @@ checker = zope.testing.renormalizing.RENormalizing([
# Python 3 produces larger pickles, even when we use zodbpickle :( # Python 3 produces larger pickles, even when we use zodbpickle :(
# this changes all the offsets and sizes in fstail.txt # this changes all the offsets and sizes in fstail.txt
(re.compile("user='' description='' " (re.compile("user='' description='' "
"length=[0-9]+ offset=[0-9]+"), "length=[0-9]+ offset=[0-9]+ \(\+23\)"),
"user='' description='' " "user='' description='' "
"length=<LENGTH> offset=<OFFSET>"), "length=<LENGTH> offset=<OFFSET> (+23)"),
(re.compile("user='' description='initial database creation' " (re.compile("user='' description='initial database creation' "
"length=[0-9]+ offset=[0-9]+"), "length=[0-9]+ offset=4 \(\+48\)"),
"user='' description='initial database creation' " "user='' description='initial database creation' "
"length=<<LENGTH> offset=<OFFSET>"), "length=<LENGTH> offset=4 (+48)"),
]) ])
def test_suite(): def test_suite():
......
...@@ -619,9 +619,9 @@ class TransactionalUndoStorage: ...@@ -619,9 +619,9 @@ class TransactionalUndoStorage:
tid = p64(i + 1) tid = p64(i + 1)
eq(txn.tid, tid) eq(txn.tid, tid)
L1 = [(rec.oid, rec.tid, rec.data_txn) for rec in txn] L1 = {(rec.oid, rec.tid, rec.data_txn) for rec in txn}
L2 = [(oid, revid, None) for _tid, oid, revid in orig L2 = {(oid, revid, None) for _tid, oid, revid in orig
if _tid == tid] if _tid == tid}
eq(L1, L2) eq(L1, L2)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment