Commit 326e9f38 authored by Stephan Richter's avatar Stephan Richter

Okay, the test failures are now down to 19/1379. The left-over errors are

largely due to:

* Different pickle size in Python
  (even for protocol 1, where new padding is added as far as I can tell.)

* Weak references do not seem to work correctly.

* When all tests are run, committing transactions fails. Probably some
  tests are not cleaning up enough after themselves.

The biggest issue remaining at this point is the fork of Python 3.3's
pickle to properly load Python 2 binary strings and adding back
``noload()`` to the unpickler.
parent f4bfe178
...@@ -39,6 +39,13 @@ except ImportError: ...@@ -39,6 +39,13 @@ except ImportError:
# Py3 # Py3
import pickle import pickle
# Py3: Python 3's `hasattr()` only swallows AttributeError.
def py2_hasattr(obj, name):
try:
getattr(obj, name)
except:
return False
return True
log = logging.getLogger("ZODB.BaseStorage") log = logging.getLogger("ZODB.BaseStorage")
...@@ -373,7 +380,7 @@ def copy(source, dest, verbose=0): ...@@ -373,7 +380,7 @@ def copy(source, dest, verbose=0):
# using store(). However, if we use store, then # using store(). However, if we use store, then
# copyTransactionsFrom() may fail with VersionLockError or # copyTransactionsFrom() may fail with VersionLockError or
# ConflictError. # ConflictError.
restoring = hasattr(dest, 'restore') restoring = py2_hasattr(dest, 'restore')
fiter = source.iterator() fiter = source.iterator()
for transaction in fiter: for transaction in fiter:
tid = transaction.tid tid = transaction.tid
......
...@@ -996,7 +996,7 @@ class FileStorage( ...@@ -996,7 +996,7 @@ class FileStorage(
raise UndoError("non-undoable transaction") raise UndoError("non-undoable transaction")
if failures: if failures:
raise MultipleUndoErrors(failures.items()) raise MultipleUndoErrors(list(failures.items()))
return tindex return tindex
......
...@@ -563,7 +563,7 @@ class FileStoragePacker(FileStorageFormatter): ...@@ -563,7 +563,7 @@ class FileStoragePacker(FileStorageFormatter):
# Update the header to reflect current information, then write # Update the header to reflect current information, then write
# it to the output file. # it to the output file.
if data is None: if data is None:
data = "" data = b''
h.prev = 0 h.prev = 0
h.back = 0 h.back = 0
h.plen = len(data) h.plen = len(data)
......
...@@ -187,7 +187,7 @@ class Blob(persistent.Persistent): ...@@ -187,7 +187,7 @@ class Blob(persistent.Persistent):
self._create_uncommitted_file() self._create_uncommitted_file()
result = BlobFile(self._p_blob_uncommitted, mode, self) result = BlobFile(self._p_blob_uncommitted, mode, self)
if self._p_blob_committed: if self._p_blob_committed:
utils.cp(open(self._p_blob_committed), result) utils.cp(open(self._p_blob_committed, 'rb'), result)
if mode == 'r+': if mode == 'r+':
result.seek(0) result.seek(0)
else: else:
...@@ -863,7 +863,7 @@ class BlobStorage(BlobStorageMixin): ...@@ -863,7 +863,7 @@ class BlobStorage(BlobStorageMixin):
data, serial_before, serial_after = load_result data, serial_before, serial_after = load_result
orig_fn = self.fshelper.getBlobFilename(oid, serial_before) orig_fn = self.fshelper.getBlobFilename(oid, serial_before)
new_fn = self.fshelper.getBlobFilename(oid, undo_serial) new_fn = self.fshelper.getBlobFilename(oid, undo_serial)
orig = open(orig_fn, "r") orig = open(orig_fn, "rb")
new = open(new_fn, "wb") new = open(new_fn, "wb")
utils.cp(orig, new) utils.cp(orig, new)
orig.close() orig.close()
......
...@@ -5,7 +5,12 @@ from __future__ import print_function ...@@ -5,7 +5,12 @@ from __future__ import print_function
import pickle import pickle
import sys import sys
from ZODB.FileStorage import FileStorage from ZODB.FileStorage import FileStorage
from cStringIO import StringIO
try:
from cStringIO import StringIO as BytesIO
except ImportError:
# Py3
from io import BytesIO
class FakeError(Exception): class FakeError(Exception):
def __init__(self, module, name): def __init__(self, module, name):
...@@ -96,7 +101,7 @@ def analyze_trans(report, txn): ...@@ -96,7 +101,7 @@ def analyze_trans(report, txn):
def get_type(record): def get_type(record):
try: try:
unpickled = FakeUnpickler(StringIO(record.data)).load() unpickled = FakeUnpickler(BytesIO(record.data)).load()
except FakeError as err: except FakeError as err:
return "%s.%s" % (err.module, err.name) return "%s.%s" % (err.module, err.name)
except: except:
......
...@@ -890,8 +890,8 @@ class MonteCarloTests(unittest.TestCase): ...@@ -890,8 +890,8 @@ class MonteCarloTests(unittest.TestCase):
self._callRepozoMain(argv) self._callRepozoMain(argv)
# check restored file content is equal to file that was backed up # check restored file content is equal to file that was backed up
f = file(correctpath, 'rb') f = open(correctpath, 'rb')
g = file(restoredfile, 'rb') g = open(restoredfile, 'rb')
fguts = f.read() fguts = f.read()
gguts = g.read() gguts = g.read()
f.close() f.close()
......
...@@ -264,7 +264,7 @@ class ObjectWriter: ...@@ -264,7 +264,7 @@ class ObjectWriter:
... ...
InvalidObjectReference: InvalidObjectReference:
('Attempt to store an object from a foreign database connection', ('Attempt to store an object from a foreign database connection',
<ZODB.serialize.DummyJar instance at ...>, P(bob)) <ZODB.serialize.DummyJar ...>, P(bob))
Constructor arguments used by __new__(), as returned by Constructor arguments used by __new__(), as returned by
__getnewargs__(), can affect memory allocation, but may also __getnewargs__(), can affect memory allocation, but may also
......
...@@ -45,10 +45,10 @@ class FileStorageCorruptTests(StorageTestBase): ...@@ -45,10 +45,10 @@ class FileStorageCorruptTests(StorageTestBase):
# truncation the index file # truncation the index file
self.failUnless(os.path.exists('Data.fs.index')) self.failUnless(os.path.exists('Data.fs.index'))
f = open('Data.fs.index', 'r+') f = open('Data.fs.index', 'rb+')
f.seek(0, 2) f.seek(0, 2)
size = f.tell() size = f.tell()
f.seek(size / 2) f.seek(size // 2)
f.truncate() f.truncate()
f.close() f.close()
...@@ -62,10 +62,10 @@ class FileStorageCorruptTests(StorageTestBase): ...@@ -62,10 +62,10 @@ class FileStorageCorruptTests(StorageTestBase):
# truncation the index file # truncation the index file
self.failUnless(os.path.exists('Data.fs.index')) self.failUnless(os.path.exists('Data.fs.index'))
size = os.stat('Data.fs.index')[stat.ST_SIZE] size = os.stat('Data.fs.index')[stat.ST_SIZE]
f = open('Data.fs.index', 'r+') f = open('Data.fs.index', 'rb+')
while f.tell() < size: while f.tell() < size:
f.seek(random.randrange(1, size / 10), 1) f.seek(random.randrange(1, size // 10), 1)
f.write('\000') f.write(b'\000')
f.close() f.close()
self._storage = ZODB.FileStorage.FileStorage('Data.fs') self._storage = ZODB.FileStorage.FileStorage('Data.fs')
......
...@@ -5,14 +5,14 @@ A storage that provides IExternalGC supports external garbage ...@@ -5,14 +5,14 @@ A storage that provides IExternalGC supports external garbage
collectors by providing a deleteObject method that transactionally collectors by providing a deleteObject method that transactionally
deletes an object. deletes an object.
A create_storage function is provided that creates a storage. A create_storage function is provided that creates a storage.
>>> storage = create_storage() >>> storage = create_storage()
>>> import ZODB.blob, transaction >>> import ZODB.blob, transaction
>>> db = ZODB.DB(storage) >>> db = ZODB.DB(storage)
>>> conn = db.open() >>> conn = db.open()
>>> conn.root()[0] = conn.root().__class__() >>> conn.root()[0] = conn.root().__class__()
>>> conn.root()[1] = ZODB.blob.Blob('some data') >>> conn.root()[1] = ZODB.blob.Blob(b'some data')
>>> transaction.commit() >>> transaction.commit()
>>> oid0 = conn.root()[0]._p_oid >>> oid0 = conn.root()[0]._p_oid
>>> oid1 = conn.root()[1]._p_oid >>> oid1 = conn.root()[1]._p_oid
......
...@@ -240,9 +240,9 @@ class IteratorDeepCompare: ...@@ -240,9 +240,9 @@ class IteratorDeepCompare:
# meaning they were the same length. # meaning they were the same length.
# Additionally, check that we're backwards compatible to the # Additionally, check that we're backwards compatible to the
# IndexError we used to raise before. # IndexError we used to raise before.
self.assertRaises(StopIteration, itxn1.next) self.assertRaises(StopIteration, next, itxn1)
self.assertRaises(StopIteration, itxn2.next) self.assertRaises(StopIteration, next, itxn2)
# Make sure ther are no more records left in txn1 and txn2, meaning # Make sure ther are no more records left in txn1 and txn2, meaning
# they were the same length # they were the same length
self.assertRaises(StopIteration, iter1.next) self.assertRaises(StopIteration, next, iter1)
self.assertRaises(StopIteration, iter2.next) self.assertRaises(StopIteration, next, iter2)
...@@ -783,4 +783,5 @@ def IExternalGC_suite(factory): ...@@ -783,4 +783,5 @@ def IExternalGC_suite(factory):
return doctest.DocFileSuite( return doctest.DocFileSuite(
'IExternalGC.test', 'IExternalGC.test',
setUp=setup, tearDown=zope.testing.setupstack.tearDown) setUp=setup, tearDown=zope.testing.setupstack.tearDown,
checker=ZODB.tests.util.checker)
...@@ -50,7 +50,7 @@ class ReadOnlyStorage: ...@@ -50,7 +50,7 @@ class ReadOnlyStorage:
self.assertRaises(ReadOnlyError, self._storage.tpc_begin, t) self.assertRaises(ReadOnlyError, self._storage.tpc_begin, t)
self.assertRaises(ReadOnlyError, self._storage.store, self.assertRaises(ReadOnlyError, self._storage.store,
'\000' * 8, None, '', '', t) b'\000' * 8, None, b'', '', t)
self.assertRaises(ReadOnlyError, self._storage.undo, self.assertRaises(ReadOnlyError, self._storage.undo,
'\000' * 8, t) b'\000' * 8, t)
...@@ -226,7 +226,7 @@ class StorageTestBase(ZODB.tests.util.TestCase): ...@@ -226,7 +226,7 @@ class StorageTestBase(ZODB.tests.util.TestCase):
vote_result = self._storage.tpc_vote(t) vote_result = self._storage.tpc_vote(t)
self._storage.tpc_finish(t) self._storage.tpc_finish(t)
if expected_oids is not None: if expected_oids is not None:
oids = undo_result and undo_result[1] or [] oids = list(undo_result[1]) if undo_result else []
oids.extend(oid for (oid, _) in vote_result or ()) oids.extend(oid for (oid, _) in vote_result or ())
self.assertEqual(len(oids), len(expected_oids), repr(oids)) self.assertEqual(len(oids), len(expected_oids), repr(oids))
for oid in expected_oids: for oid in expected_oids:
......
...@@ -44,12 +44,11 @@ def snooze(): ...@@ -44,12 +44,11 @@ def snooze():
time.sleep(0.1) time.sleep(0.1)
def listeq(L1, L2): def listeq(L1, L2):
"""Return True if L1.sort() == L2.sort()""" """Return True if L1.sort() == L2.sort()
c1 = L1[:]
c2 = L2[:] Also support iterators.
c1.sort() """
c2.sort() return sorted(L1) == sorted(L2)
return c1 == c2
class TransactionalUndoStorage: class TransactionalUndoStorage:
...@@ -59,7 +58,7 @@ class TransactionalUndoStorage: ...@@ -59,7 +58,7 @@ class TransactionalUndoStorage:
def _transaction_store(self, oid, rev, data, vers, trans): def _transaction_store(self, oid, rev, data, vers, trans):
r = self._storage.store(oid, rev, data, vers, trans) r = self._storage.store(oid, rev, data, vers, trans)
if r: if r:
if type(r) == str: if isinstance(r, bytes):
self.__serials[oid] = r self.__serials[oid] = r
else: else:
for oid, serial in r: for oid, serial in r:
...@@ -432,7 +431,7 @@ class TransactionalUndoStorage: ...@@ -432,7 +431,7 @@ class TransactionalUndoStorage:
# record by packing. # record by packing.
# Add a few object revisions # Add a few object revisions
oid = '\0'*8 oid = b'\0'*8
revid0 = self._dostore(oid, data=MinPO(50)) revid0 = self._dostore(oid, data=MinPO(50))
revid1 = self._dostore(oid, revid=revid0, data=MinPO(51)) revid1 = self._dostore(oid, revid=revid0, data=MinPO(51))
snooze() snooze()
...@@ -492,14 +491,14 @@ class TransactionalUndoStorage: ...@@ -492,14 +491,14 @@ class TransactionalUndoStorage:
log = self._storage.undoLog() log = self._storage.undoLog()
eq(len(log), 4) eq(len(log), 4)
for entry in zip(log, ('o1 -> o3', 'o1 -> o2 -> o3', for entry in zip(log, (b'o1 -> o3', b'o1 -> o2 -> o3',
'o1 -> o2', 'initial database creation')): b'o1 -> o2', b'initial database creation')):
eq(entry[0]['description'], entry[1]) eq(entry[0]['description'], entry[1])
self._storage.pack(packtime, referencesf) self._storage.pack(packtime, referencesf)
log = self._storage.undoLog() log = self._storage.undoLog()
for entry in zip(log, ('o1 -> o3', 'o1 -> o2 -> o3')): for entry in zip(log, (b'o1 -> o3', b'o1 -> o2 -> o3')):
eq(entry[0]['description'], entry[1]) eq(entry[0]['description'], entry[1])
tid = log[0]['id'] tid = log[0]['id']
...@@ -511,7 +510,7 @@ class TransactionalUndoStorage: ...@@ -511,7 +510,7 @@ class TransactionalUndoStorage:
conn.sync() conn.sync()
log = self._storage.undoLog() log = self._storage.undoLog()
for entry in zip(log, ('undo', 'o1 -> o3', 'o1 -> o2 -> o3')): for entry in zip(log, (b'undo', b'o1 -> o3', b'o1 -> o2 -> o3')):
eq(entry[0]['description'], entry[1]) eq(entry[0]['description'], entry[1])
eq(o1.obj, o2) eq(o1.obj, o2)
...@@ -703,13 +702,13 @@ class TransactionalUndoStorage: ...@@ -703,13 +702,13 @@ class TransactionalUndoStorage:
L2.sort() L2.sort()
eq(L1, L2) eq(L1, L2)
self.assertRaises(StopIteration, transactions.next) self.assertRaises(StopIteration, next, transactions)
def checkUndoLogMetadata(self): def checkUndoLogMetadata(self):
# test that the metadata is correct in the undo log # test that the metadata is correct in the undo log
t = transaction.get() t = transaction.get()
t.note('t1') t.note('t1')
t.setExtendedInfo('k2','this is transaction metadata') t.setExtendedInfo('k2', 'this is transaction metadata')
t.setUser('u3',path='p3') t.setUser('u3',path='p3')
db = DB(self._storage) db = DB(self._storage)
conn = db.open() conn = db.open()
...@@ -721,9 +720,9 @@ class TransactionalUndoStorage: ...@@ -721,9 +720,9 @@ class TransactionalUndoStorage:
l = self._storage.undoLog() l = self._storage.undoLog()
self.assertEqual(len(l),2) self.assertEqual(len(l),2)
d = l[0] d = l[0]
self.assertEqual(d['description'],'t1') self.assertEqual(d['description'], b't1')
self.assertEqual(d['k2'],'this is transaction metadata') self.assertEqual(d['k2'], 'this is transaction metadata')
self.assertEqual(d['user_name'],'p3 u3') self.assertEqual(d['user_name'], b'p3 u3')
# A common test body for index tests on undoInfo and undoLog. Before # A common test body for index tests on undoInfo and undoLog. Before
# ZODB 3.4, they always returned a wrong number of results (one too # ZODB 3.4, they always returned a wrong number of results (one too
......
...@@ -9,7 +9,7 @@ with some data: ...@@ -9,7 +9,7 @@ with some data:
>>> import transaction >>> import transaction
>>> blob = Blob() >>> blob = Blob()
>>> data = blob.open("w") >>> data = blob.open("w")
>>> data.write("I'm a happy Blob.") >>> _ = data.write(b"I'm a happy Blob.")
>>> data.close() >>> data.close()
We also need a database with a blob supporting storage. (We're going to use We also need a database with a blob supporting storage. (We're going to use
...@@ -52,7 +52,7 @@ MVCC also works. ...@@ -52,7 +52,7 @@ MVCC also works.
>>> transaction3 = transaction.TransactionManager() >>> transaction3 = transaction.TransactionManager()
>>> connection3 = database.open(transaction_manager=transaction3) >>> connection3 = database.open(transaction_manager=transaction3)
>>> f = connection.root()['myblob'].open('w') >>> f = connection.root()['myblob'].open('w')
>>> f.write('I am an ecstatic Blob.') >>> _ = f.write(b'I am an ecstatic Blob.')
>>> f.close() >>> f.close()
>>> transaction.commit() >>> transaction.commit()
>>> connection3.root()['myblob'].open('r').read() >>> connection3.root()['myblob'].open('r').read()
......
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
Import/export support for blob data Import/export support for blob data
=================================== ===================================
...@@ -34,12 +20,14 @@ Put a couple blob objects in our database1 and on the filesystem: ...@@ -34,12 +20,14 @@ Put a couple blob objects in our database1 and on the filesystem:
>>> import time, os >>> import time, os
>>> nothing = transaction.begin() >>> nothing = transaction.begin()
>>> data1 = 'x'*100000 >>> data1 = b'x'*100000
>>> blob1 = ZODB.blob.Blob() >>> blob1 = ZODB.blob.Blob()
>>> blob1.open('w').write(data1) >>> with blob1.open('w') as file:
>>> data2 = 'y'*100000 ... _ = file.write(data1)
>>> data2 = b'y'*100000
>>> blob2 = ZODB.blob.Blob() >>> blob2 = ZODB.blob.Blob()
>>> blob2.open('w').write(data2) >>> with blob2.open('w') as file:
... _ = file.write(data2)
>>> d = PersistentMapping({'blob1':blob1, 'blob2':blob2}) >>> d = PersistentMapping({'blob1':blob1, 'blob2':blob2})
>>> root1['blobdata'] = d >>> root1['blobdata'] = d
>>> transaction.commit() >>> transaction.commit()
......
...@@ -34,32 +34,37 @@ Put some revisions of a blob object in our database and on the filesystem: ...@@ -34,32 +34,37 @@ Put some revisions of a blob object in our database and on the filesystem:
>>> nothing = transaction.begin() >>> nothing = transaction.begin()
>>> times.append(new_time()) >>> times.append(new_time())
>>> blob = Blob() >>> blob = Blob()
>>> blob.open('w').write('this is blob data 0') >>> with blob.open('w') as file:
... _ = file.write(b'this is blob data 0')
>>> root['blob'] = blob >>> root['blob'] = blob
>>> transaction.commit() >>> transaction.commit()
>>> tids.append(blob._p_serial) >>> tids.append(blob._p_serial)
>>> nothing = transaction.begin() >>> nothing = transaction.begin()
>>> times.append(new_time()) >>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 1') >>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 1')
>>> transaction.commit() >>> transaction.commit()
>>> tids.append(blob._p_serial) >>> tids.append(blob._p_serial)
>>> nothing = transaction.begin() >>> nothing = transaction.begin()
>>> times.append(new_time()) >>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 2') >>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 2')
>>> transaction.commit() >>> transaction.commit()
>>> tids.append(blob._p_serial) >>> tids.append(blob._p_serial)
>>> nothing = transaction.begin() >>> nothing = transaction.begin()
>>> times.append(new_time()) >>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 3') >>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 3')
>>> transaction.commit() >>> transaction.commit()
>>> tids.append(blob._p_serial) >>> tids.append(blob._p_serial)
>>> nothing = transaction.begin() >>> nothing = transaction.begin()
>>> times.append(new_time()) >>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 4') >>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 4')
>>> transaction.commit() >>> transaction.commit()
>>> tids.append(blob._p_serial) >>> tids.append(blob._p_serial)
...@@ -74,7 +79,7 @@ Do a pack to the slightly before the first revision was written: ...@@ -74,7 +79,7 @@ Do a pack to the slightly before the first revision was written:
>>> blob_storage.pack(packtime, referencesf) >>> blob_storage.pack(packtime, referencesf)
>>> [ os.path.exists(x) for x in fns ] >>> [ os.path.exists(x) for x in fns ]
[True, True, True, True, True] [True, True, True, True, True]
Do a pack to the slightly before the second revision was written: Do a pack to the slightly before the second revision was written:
>>> packtime = times[1] >>> packtime = times[1]
......
##############################################################################
#
# Copyright (c) 2005-2007 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
Transaction support for Blobs Transaction support for Blobs
============================= =============================
...@@ -27,7 +13,8 @@ We need a database with a blob supporting storage:: ...@@ -27,7 +13,8 @@ We need a database with a blob supporting storage::
Putting a Blob into a Connection works like any other Persistent object:: Putting a Blob into a Connection works like any other Persistent object::
>>> blob1 = ZODB.blob.Blob() >>> blob1 = ZODB.blob.Blob()
>>> blob1.open('w').write('this is blob 1') >>> with blob1.open('w') as file:
... _ = file.write(b'this is blob 1')
>>> root1['blob1'] = blob1 >>> root1['blob1'] = blob1
>>> 'blob1' in root1 >>> 'blob1' in root1
True True
...@@ -62,7 +49,8 @@ state: ...@@ -62,7 +49,8 @@ state:
False False
>>> blob1._p_blob_uncommitted >>> blob1._p_blob_uncommitted
>>> blob1.open('w').write('this is new blob 1') >>> with blob1.open('w') as file:
... _ = file.write(b'this is new blob 1')
>>> blob1.open().read() >>> blob1.open().read()
'this is new blob 1' 'this is new blob 1'
>>> fname = blob1._p_blob_uncommitted >>> fname = blob1._p_blob_uncommitted
...@@ -120,14 +108,15 @@ when we start):: ...@@ -120,14 +108,15 @@ when we start)::
>>> blob1afh3 = blob1a.open('a') >>> blob1afh3 = blob1a.open('a')
>>> bool(blob1a._p_changed) >>> bool(blob1a._p_changed)
True True
>>> blob1afh3.write('woot!') >>> _ = blob1afh3.write(b'woot!')
>>> blob1afh3.close() >>> blob1afh3.close()
We can open more than one blob object during the course of a single We can open more than one blob object during the course of a single
transaction:: transaction::
>>> blob2 = ZODB.blob.Blob() >>> blob2 = ZODB.blob.Blob()
>>> blob2.open('w').write('this is blob 3') >>> with blob2.open('w') as file:
... _ = file.write(b'this is blob 3')
>>> root2['blob2'] = blob2 >>> root2['blob2'] = blob2
>>> transaction.commit() >>> transaction.commit()
...@@ -150,8 +139,8 @@ moment):: ...@@ -150,8 +139,8 @@ moment)::
>>> root1['wontwork'] = blob1.open('r') >>> root1['wontwork'] = blob1.open('r')
>>> transaction.commit() >>> transaction.commit()
Traceback (most recent call last): Traceback (most recent call last):
... ...
TypeError: coercing to Unicode: need string or buffer, BlobFile found TypeError: ...
Abort for good measure:: Abort for good measure::
...@@ -166,8 +155,8 @@ connections should result in a write conflict error:: ...@@ -166,8 +155,8 @@ connections should result in a write conflict error::
>>> root4 = database.open(transaction_manager=tm2).root() >>> root4 = database.open(transaction_manager=tm2).root()
>>> blob1c3 = root3['blob1'] >>> blob1c3 = root3['blob1']
>>> blob1c4 = root4['blob1'] >>> blob1c4 = root4['blob1']
>>> blob1c3fh1 = blob1c3.open('a').write('this is from connection 3') >>> blob1c3fh1 = blob1c3.open('a').write(b'this is from connection 3')
>>> blob1c4fh1 = blob1c4.open('a').write('this is from connection 4') >>> blob1c4fh1 = blob1c4.open('a').write(b'this is from connection 4')
>>> tm1.commit() >>> tm1.commit()
>>> root3['blob1'].open('r').read() >>> root3['blob1'].open('r').read()
'this is blob 1woot!this is from connection 3' 'this is blob 1woot!this is from connection 3'
...@@ -215,14 +204,14 @@ We do support optimistic savepoints: ...@@ -215,14 +204,14 @@ We do support optimistic savepoints:
>>> root5 = connection5.root() >>> root5 = connection5.root()
>>> blob = ZODB.blob.Blob() >>> blob = ZODB.blob.Blob()
>>> blob_fh = blob.open("w") >>> blob_fh = blob.open("w")
>>> blob_fh.write("I'm a happy blob.") >>> _ = blob_fh.write(b"I'm a happy blob.")
>>> blob_fh.close() >>> blob_fh.close()
>>> root5['blob'] = blob >>> root5['blob'] = blob
>>> transaction.commit() >>> transaction.commit()
>>> root5['blob'].open("r").read() >>> root5['blob'].open("r").read()
"I'm a happy blob." "I'm a happy blob."
>>> blob_fh = root5['blob'].open("a") >>> blob_fh = root5['blob'].open("a")
>>> blob_fh.write(" And I'm singing.") >>> _ = blob_fh.write(b" And I'm singing.")
>>> blob_fh.close() >>> blob_fh.close()
>>> root5['blob'].open("r").read() >>> root5['blob'].open("r").read()
"I'm a happy blob. And I'm singing." "I'm a happy blob. And I'm singing."
...@@ -248,7 +237,8 @@ the committed location again: ...@@ -248,7 +237,8 @@ the committed location again:
We support non-optimistic savepoints too: We support non-optimistic savepoints too:
>>> root5['blob'].open("a").write(" And I'm dancing.") >>> with root5['blob'].open("a") as file:
... _ = file.write(b" And I'm dancing.")
>>> root5['blob'].open("r").read() >>> root5['blob'].open("r").read()
"I'm a happy blob. And I'm singing. And I'm dancing." "I'm a happy blob. And I'm singing. And I'm dancing."
>>> savepoint = transaction.savepoint() >>> savepoint = transaction.savepoint()
...@@ -259,7 +249,8 @@ Again, the savepoint creates a new savepoints directory: ...@@ -259,7 +249,8 @@ Again, the savepoint creates a new savepoints directory:
... if name.startswith('savepoint')]) ... if name.startswith('savepoint')])
1 1
>>> root5['blob'].open("w").write(" And the weather is beautiful.") >>> with root5['blob'].open("w") as file:
... _ = file.write(b" And the weather is beautiful.")
>>> savepoint.rollback() >>> savepoint.rollback()
>>> root5['blob'].open("r").read() >>> root5['blob'].open("r").read()
...@@ -283,7 +274,7 @@ file that can be opened. ...@@ -283,7 +274,7 @@ file that can be opened.
>>> root6 = connection6.root() >>> root6 = connection6.root()
>>> blob = ZODB.blob.Blob() >>> blob = ZODB.blob.Blob()
>>> blob_fh = blob.open("w") >>> blob_fh = blob.open("w")
>>> blob_fh.write("I'm a happy blob.") >>> _ = blob_fh.write(b"I'm a happy blob.")
>>> blob_fh.close() >>> blob_fh.close()
>>> root6['blob'] = blob >>> root6['blob'] = blob
>>> transaction.commit() >>> transaction.commit()
...@@ -296,12 +287,13 @@ We can also read committed data by calling open with a 'c' flag: ...@@ -296,12 +287,13 @@ We can also read committed data by calling open with a 'c' flag:
This just returns a regular file object: This just returns a regular file object:
>>> type(f) >>> type(f) == file_type
<type 'file'> True
and doesn't prevent us from opening the blob for writing: and doesn't prevent us from opening the blob for writing:
>>> blob.open('w').write('x') >>> with blob.open('w') as file:
... _ = file.write(b'x')
>>> blob.open().read() >>> blob.open().read()
'x' 'x'
...@@ -325,7 +317,8 @@ uncommitted changes: ...@@ -325,7 +317,8 @@ uncommitted changes:
... ...
BlobError: Uncommitted changes BlobError: Uncommitted changes
>>> blob.open('w').write("I'm a happy blob.") >>> with blob.open('w') as file:
... _ = file.write(b"I'm a happy blob.")
>>> root6['blob6'] = blob >>> root6['blob6'] = blob
>>> blob.committed() >>> blob.committed()
Traceback (most recent call last): Traceback (most recent call last):
...@@ -354,10 +347,12 @@ uncommitted changes: ...@@ -354,10 +347,12 @@ uncommitted changes:
You can't open a committed blob file for writing: You can't open a committed blob file for writing:
>>> open(blob.committed(), 'w') # doctest: +ELLIPSIS >>> try:
Traceback (most recent call last): ... open(blob.committed(), 'w') # doctest: +ELLIPSIS
... ... except:
IOError: ... ... # Produces IOError in Py2 and PermissionError in Py3
... print('Error raised.')
Error raised.
tpc_abort tpc_abort
--------- ---------
...@@ -368,11 +363,13 @@ stored are discarded. ...@@ -368,11 +363,13 @@ stored are discarded.
>>> olddata, oldserial = blob_storage.load(blob._p_oid, '') >>> olddata, oldserial = blob_storage.load(blob._p_oid, '')
>>> t = transaction.get() >>> t = transaction.get()
>>> blob_storage.tpc_begin(t) >>> blob_storage.tpc_begin(t)
>>> open('blobfile', 'w').write('This data should go away') >>> with open('blobfile', 'wb') as file:
... _ = file.write(b'This data should go away')
>>> s1 = blob_storage.storeBlob(blob._p_oid, oldserial, olddata, 'blobfile', >>> s1 = blob_storage.storeBlob(blob._p_oid, oldserial, olddata, 'blobfile',
... '', t) ... '', t)
>>> new_oid = blob_storage.new_oid() >>> new_oid = blob_storage.new_oid()
>>> open('blobfile2', 'w').write('This data should go away too') >>> with open('blobfile2', 'wb') as file:
... _ = file.write(b'This data should go away too')
>>> s2 = blob_storage.storeBlob(new_oid, '\0'*8, olddata, 'blobfile2', >>> s2 = blob_storage.storeBlob(new_oid, '\0'*8, olddata, 'blobfile2',
... '', t) ... '', t)
......
...@@ -187,10 +187,10 @@ class FileStorageTests( ...@@ -187,10 +187,10 @@ class FileStorageTests(
# of the largest oid in use. # of the largest oid in use.
t = transaction.Transaction() t = transaction.Transaction()
self._storage.tpc_begin(t) self._storage.tpc_begin(t)
giant_oid = '\xee' * 8 giant_oid = b'\xee' * 8
# Store an object. # Store an object.
# oid, serial, data, version, transaction # oid, serial, data, version, transaction
r1 = self._storage.store(giant_oid, '\0'*8, 'data', '', t) r1 = self._storage.store(giant_oid, b'\0'*8, b'data', b'', t)
# Finish the transaction. # Finish the transaction.
r2 = self._storage.tpc_vote(t) r2 = self._storage.tpc_vote(t)
self._storage.tpc_finish(t) self._storage.tpc_finish(t)
...@@ -204,10 +204,10 @@ class FileStorageTests( ...@@ -204,10 +204,10 @@ class FileStorageTests(
# ZRS recovery, use the .restore() method, this is plain critical. # ZRS recovery, use the .restore() method, this is plain critical.
t = transaction.Transaction() t = transaction.Transaction()
self._storage.tpc_begin(t) self._storage.tpc_begin(t)
giant_oid = '\xee' * 8 giant_oid = b'\xee' * 8
# Store an object. # Store an object.
# oid, serial, data, version, prev_txn, transaction # oid, serial, data, version, prev_txn, transaction
r1 = self._storage.restore(giant_oid, '\0'*8, 'data', '', None, t) r1 = self._storage.restore(giant_oid, b'\0'*8, b'data', b'', None, t)
# Finish the transaction. # Finish the transaction.
r2 = self._storage.tpc_vote(t) r2 = self._storage.tpc_vote(t)
self._storage.tpc_finish(t) self._storage.tpc_finish(t)
...@@ -286,7 +286,7 @@ class FileStorageTests( ...@@ -286,7 +286,7 @@ class FileStorageTests(
expected_data, expected_tid = self._storage.load(oid, '') expected_data, expected_tid = self._storage.load(oid, '')
self.assertEqual(expected_data, data) self.assertEqual(expected_data, data)
self.assertEqual(expected_tid, tid) self.assertEqual(expected_tid, tid)
if x == '\002': if x == b'\002':
self.assertEqual(next_oid, None) self.assertEqual(next_oid, None)
else: else:
self.assertNotEqual(next_oid, None) self.assertNotEqual(next_oid, None)
...@@ -374,13 +374,13 @@ class AnalyzeDotPyTest(StorageTestBase.StorageTestBase): ...@@ -374,13 +374,13 @@ class AnalyzeDotPyTest(StorageTestBase.StorageTestBase):
self._storage = ZODB.FileStorage.FileStorage("Source.fs", create=True) self._storage = ZODB.FileStorage.FileStorage("Source.fs", create=True)
def checkanalyze(self): def checkanalyze(self):
import new, sys, pickle import types, sys, pickle
from BTrees.OOBTree import OOBTree from BTrees.OOBTree import OOBTree
from ZODB.scripts import analyze from ZODB.scripts import analyze
# Set up a module to act as a broken import # Set up a module to act as a broken import
module_name = 'brokenmodule' module_name = 'brokenmodule'
module = new.module(module_name) module = types.ModuleType(module_name)
sys.modules[module_name] = module sys.modules[module_name] = module
class Broken(MinPO): class Broken(MinPO):
...@@ -426,8 +426,7 @@ class AnalyzeDotPyTest(StorageTestBase.StorageTestBase): ...@@ -426,8 +426,7 @@ class AnalyzeDotPyTest(StorageTestBase.StorageTestBase):
analyze.analyze_trans(rep, txn) analyze.analyze_trans(rep, txn)
# from ZODB.scripts.analyze.report # from ZODB.scripts.analyze.report
typemap = rep.TYPEMAP.keys() typemap = sorted(rep.TYPEMAP.keys())
typemap.sort()
cumpct = 0.0 cumpct = 0.0
for t in typemap: for t in typemap:
pct = rep.TYPESIZE[t] * 100.0 / rep.DBYTES pct = rep.TYPESIZE[t] * 100.0 / rep.DBYTES
...@@ -605,19 +604,19 @@ def deal_with_finish_failures(): ...@@ -605,19 +604,19 @@ def deal_with_finish_failures():
>>> import zope.testing.loggingsupport >>> import zope.testing.loggingsupport
>>> handler = zope.testing.loggingsupport.InstalledHandler( >>> handler = zope.testing.loggingsupport.InstalledHandler(
... 'ZODB.FileStorage') ... 'ZODB.FileStorage')
>>> transaction.commit() >>> transaction.commit() # doctest: +ELLIPSIS
Traceback (most recent call last): Traceback (most recent call last):
... ...
TypeError: <lambda>() takes no arguments (1 given) TypeError: <lambda>() takes ...
>>> print handler >>> print(handler)
ZODB.FileStorage CRITICAL ZODB.FileStorage CRITICAL
Failure in _finish. Closing. Failure in _finish. Closing.
>>> handler.uninstall() >>> handler.uninstall()
>>> fs.load('\0'*8, '') # doctest: +ELLIPSIS >>> fs.load(b'\0'*8, b'') # doctest: +ELLIPSIS
Traceback (most recent call last): Traceback (most recent call last):
... ...
ValueError: ... ValueError: ...
...@@ -645,7 +644,7 @@ def pack_with_open_blob_files(): ...@@ -645,7 +644,7 @@ def pack_with_open_blob_files():
>>> conn1.root()[1] = ZODB.blob.Blob() >>> conn1.root()[1] = ZODB.blob.Blob()
>>> conn1.add(conn1.root()[1]) >>> conn1.add(conn1.root()[1])
>>> with conn1.root()[1].open('w') as file: >>> with conn1.root()[1].open('w') as file:
... file.write('some data') ... _ = file.write(b'some data')
>>> tm1.commit() >>> tm1.commit()
>>> tm2 = transaction.TransactionManager() >>> tm2 = transaction.TransactionManager()
...@@ -654,7 +653,7 @@ def pack_with_open_blob_files(): ...@@ -654,7 +653,7 @@ def pack_with_open_blob_files():
>>> conn1.root()[2] = ZODB.blob.Blob() >>> conn1.root()[2] = ZODB.blob.Blob()
>>> conn1.add(conn1.root()[2]) >>> conn1.add(conn1.root()[2])
>>> with conn1.root()[2].open('w') as file: >>> with conn1.root()[2].open('w') as file:
... file.write('some more data') ... _ = file.write(b'some more data')
>>> db.pack() >>> db.pack()
>>> f.read() >>> f.read()
...@@ -682,7 +681,8 @@ def test_suite(): ...@@ -682,7 +681,8 @@ def test_suite():
suite.addTest(unittest.makeSuite(klass, "check")) suite.addTest(unittest.makeSuite(klass, "check"))
suite.addTest(doctest.DocTestSuite( suite.addTest(doctest.DocTestSuite(
setUp=zope.testing.setupstack.setUpDirectory, setUp=zope.testing.setupstack.setUpDirectory,
tearDown=zope.testing.setupstack.tearDown)) tearDown=zope.testing.setupstack.tearDown,
checker=ZODB.tests.util.checker))
suite.addTest(ZODB.tests.testblob.storage_reusable_suite( suite.addTest(ZODB.tests.testblob.storage_reusable_suite(
'BlobFileStorage', 'BlobFileStorage',
lambda name, blob_dir: lambda name, blob_dir:
......
This diff is collapsed.
...@@ -54,6 +54,8 @@ checker = renormalizing.RENormalizing([ ...@@ -54,6 +54,8 @@ checker = renormalizing.RENormalizing([
r"InvalidObjectReference"), r"InvalidObjectReference"),
(re.compile("ZODB.POSException.ReadOnlyHistoryError"), (re.compile("ZODB.POSException.ReadOnlyHistoryError"),
r"ReadOnlyHistoryError"), r"ReadOnlyHistoryError"),
(re.compile("ZODB.POSException.Unsupported"),
r"Unsupported"),
(re.compile("ZConfig.ConfigurationSyntaxError"), (re.compile("ZConfig.ConfigurationSyntaxError"),
r"ConfigurationSyntaxError"), r"ConfigurationSyntaxError"),
]) ])
......
...@@ -16,6 +16,7 @@ deps = ...@@ -16,6 +16,7 @@ deps =
zdaemon zdaemon
zope.interface zope.interface
zope.testing zope.testing
zope.testrunner
[testenv:coverage] [testenv:coverage]
basepython = basepython =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment