Commit 326e9f38 authored by Stephan Richter's avatar Stephan Richter

Okay, the test failures are now down to 19/1379. The left-over errors are

largely due to:

* Different pickle size in Python
  (even for protocol 1, where new padding is added as far as I can tell.)

* Weak references do not seem to work correctly.

* When all tests are run, committing transactions fails. Probably some
  tests are not cleaning up enough after themselves.

The biggest issue remaining at this point is the fork of Python 3.3's
pickle to properly load Python 2 binary strings and adding back
``noload()`` to the unpickler.
parent f4bfe178
......@@ -39,6 +39,13 @@ except ImportError:
# Py3
import pickle
# Py3: Python 3's `hasattr()` only swallows AttributeError.
def py2_hasattr(obj, name):
try:
getattr(obj, name)
except:
return False
return True
log = logging.getLogger("ZODB.BaseStorage")
......@@ -373,7 +380,7 @@ def copy(source, dest, verbose=0):
# using store(). However, if we use store, then
# copyTransactionsFrom() may fail with VersionLockError or
# ConflictError.
restoring = hasattr(dest, 'restore')
restoring = py2_hasattr(dest, 'restore')
fiter = source.iterator()
for transaction in fiter:
tid = transaction.tid
......
......@@ -996,7 +996,7 @@ class FileStorage(
raise UndoError("non-undoable transaction")
if failures:
raise MultipleUndoErrors(failures.items())
raise MultipleUndoErrors(list(failures.items()))
return tindex
......
......@@ -563,7 +563,7 @@ class FileStoragePacker(FileStorageFormatter):
# Update the header to reflect current information, then write
# it to the output file.
if data is None:
data = ""
data = b''
h.prev = 0
h.back = 0
h.plen = len(data)
......
......@@ -187,7 +187,7 @@ class Blob(persistent.Persistent):
self._create_uncommitted_file()
result = BlobFile(self._p_blob_uncommitted, mode, self)
if self._p_blob_committed:
utils.cp(open(self._p_blob_committed), result)
utils.cp(open(self._p_blob_committed, 'rb'), result)
if mode == 'r+':
result.seek(0)
else:
......@@ -863,7 +863,7 @@ class BlobStorage(BlobStorageMixin):
data, serial_before, serial_after = load_result
orig_fn = self.fshelper.getBlobFilename(oid, serial_before)
new_fn = self.fshelper.getBlobFilename(oid, undo_serial)
orig = open(orig_fn, "r")
orig = open(orig_fn, "rb")
new = open(new_fn, "wb")
utils.cp(orig, new)
orig.close()
......
......@@ -5,7 +5,12 @@ from __future__ import print_function
import pickle
import sys
from ZODB.FileStorage import FileStorage
from cStringIO import StringIO
try:
from cStringIO import StringIO as BytesIO
except ImportError:
# Py3
from io import BytesIO
class FakeError(Exception):
def __init__(self, module, name):
......@@ -96,7 +101,7 @@ def analyze_trans(report, txn):
def get_type(record):
try:
unpickled = FakeUnpickler(StringIO(record.data)).load()
unpickled = FakeUnpickler(BytesIO(record.data)).load()
except FakeError as err:
return "%s.%s" % (err.module, err.name)
except:
......
......@@ -890,8 +890,8 @@ class MonteCarloTests(unittest.TestCase):
self._callRepozoMain(argv)
# check restored file content is equal to file that was backed up
f = file(correctpath, 'rb')
g = file(restoredfile, 'rb')
f = open(correctpath, 'rb')
g = open(restoredfile, 'rb')
fguts = f.read()
gguts = g.read()
f.close()
......
......@@ -264,7 +264,7 @@ class ObjectWriter:
...
InvalidObjectReference:
('Attempt to store an object from a foreign database connection',
<ZODB.serialize.DummyJar instance at ...>, P(bob))
<ZODB.serialize.DummyJar ...>, P(bob))
Constructor arguments used by __new__(), as returned by
__getnewargs__(), can affect memory allocation, but may also
......
......@@ -45,10 +45,10 @@ class FileStorageCorruptTests(StorageTestBase):
# truncation the index file
self.failUnless(os.path.exists('Data.fs.index'))
f = open('Data.fs.index', 'r+')
f = open('Data.fs.index', 'rb+')
f.seek(0, 2)
size = f.tell()
f.seek(size / 2)
f.seek(size // 2)
f.truncate()
f.close()
......@@ -62,10 +62,10 @@ class FileStorageCorruptTests(StorageTestBase):
# truncation the index file
self.failUnless(os.path.exists('Data.fs.index'))
size = os.stat('Data.fs.index')[stat.ST_SIZE]
f = open('Data.fs.index', 'r+')
f = open('Data.fs.index', 'rb+')
while f.tell() < size:
f.seek(random.randrange(1, size / 10), 1)
f.write('\000')
f.seek(random.randrange(1, size // 10), 1)
f.write(b'\000')
f.close()
self._storage = ZODB.FileStorage.FileStorage('Data.fs')
......
......@@ -12,7 +12,7 @@ A create_storage function is provided that creates a storage.
>>> db = ZODB.DB(storage)
>>> conn = db.open()
>>> conn.root()[0] = conn.root().__class__()
>>> conn.root()[1] = ZODB.blob.Blob('some data')
>>> conn.root()[1] = ZODB.blob.Blob(b'some data')
>>> transaction.commit()
>>> oid0 = conn.root()[0]._p_oid
>>> oid1 = conn.root()[1]._p_oid
......
......@@ -240,9 +240,9 @@ class IteratorDeepCompare:
# meaning they were the same length.
# Additionally, check that we're backwards compatible to the
# IndexError we used to raise before.
self.assertRaises(StopIteration, itxn1.next)
self.assertRaises(StopIteration, itxn2.next)
self.assertRaises(StopIteration, next, itxn1)
self.assertRaises(StopIteration, next, itxn2)
# Make sure ther are no more records left in txn1 and txn2, meaning
# they were the same length
self.assertRaises(StopIteration, iter1.next)
self.assertRaises(StopIteration, iter2.next)
self.assertRaises(StopIteration, next, iter1)
self.assertRaises(StopIteration, next, iter2)
......@@ -783,4 +783,5 @@ def IExternalGC_suite(factory):
return doctest.DocFileSuite(
'IExternalGC.test',
setUp=setup, tearDown=zope.testing.setupstack.tearDown)
setUp=setup, tearDown=zope.testing.setupstack.tearDown,
checker=ZODB.tests.util.checker)
......@@ -50,7 +50,7 @@ class ReadOnlyStorage:
self.assertRaises(ReadOnlyError, self._storage.tpc_begin, t)
self.assertRaises(ReadOnlyError, self._storage.store,
'\000' * 8, None, '', '', t)
b'\000' * 8, None, b'', '', t)
self.assertRaises(ReadOnlyError, self._storage.undo,
'\000' * 8, t)
b'\000' * 8, t)
......@@ -226,7 +226,7 @@ class StorageTestBase(ZODB.tests.util.TestCase):
vote_result = self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
if expected_oids is not None:
oids = undo_result and undo_result[1] or []
oids = list(undo_result[1]) if undo_result else []
oids.extend(oid for (oid, _) in vote_result or ())
self.assertEqual(len(oids), len(expected_oids), repr(oids))
for oid in expected_oids:
......
......@@ -44,12 +44,11 @@ def snooze():
time.sleep(0.1)
def listeq(L1, L2):
"""Return True if L1.sort() == L2.sort()"""
c1 = L1[:]
c2 = L2[:]
c1.sort()
c2.sort()
return c1 == c2
"""Return True if L1.sort() == L2.sort()
Also support iterators.
"""
return sorted(L1) == sorted(L2)
class TransactionalUndoStorage:
......@@ -59,7 +58,7 @@ class TransactionalUndoStorage:
def _transaction_store(self, oid, rev, data, vers, trans):
r = self._storage.store(oid, rev, data, vers, trans)
if r:
if type(r) == str:
if isinstance(r, bytes):
self.__serials[oid] = r
else:
for oid, serial in r:
......@@ -432,7 +431,7 @@ class TransactionalUndoStorage:
# record by packing.
# Add a few object revisions
oid = '\0'*8
oid = b'\0'*8
revid0 = self._dostore(oid, data=MinPO(50))
revid1 = self._dostore(oid, revid=revid0, data=MinPO(51))
snooze()
......@@ -492,14 +491,14 @@ class TransactionalUndoStorage:
log = self._storage.undoLog()
eq(len(log), 4)
for entry in zip(log, ('o1 -> o3', 'o1 -> o2 -> o3',
'o1 -> o2', 'initial database creation')):
for entry in zip(log, (b'o1 -> o3', b'o1 -> o2 -> o3',
b'o1 -> o2', b'initial database creation')):
eq(entry[0]['description'], entry[1])
self._storage.pack(packtime, referencesf)
log = self._storage.undoLog()
for entry in zip(log, ('o1 -> o3', 'o1 -> o2 -> o3')):
for entry in zip(log, (b'o1 -> o3', b'o1 -> o2 -> o3')):
eq(entry[0]['description'], entry[1])
tid = log[0]['id']
......@@ -511,7 +510,7 @@ class TransactionalUndoStorage:
conn.sync()
log = self._storage.undoLog()
for entry in zip(log, ('undo', 'o1 -> o3', 'o1 -> o2 -> o3')):
for entry in zip(log, (b'undo', b'o1 -> o3', b'o1 -> o2 -> o3')):
eq(entry[0]['description'], entry[1])
eq(o1.obj, o2)
......@@ -703,13 +702,13 @@ class TransactionalUndoStorage:
L2.sort()
eq(L1, L2)
self.assertRaises(StopIteration, transactions.next)
self.assertRaises(StopIteration, next, transactions)
def checkUndoLogMetadata(self):
# test that the metadata is correct in the undo log
t = transaction.get()
t.note('t1')
t.setExtendedInfo('k2','this is transaction metadata')
t.setExtendedInfo('k2', 'this is transaction metadata')
t.setUser('u3',path='p3')
db = DB(self._storage)
conn = db.open()
......@@ -721,9 +720,9 @@ class TransactionalUndoStorage:
l = self._storage.undoLog()
self.assertEqual(len(l),2)
d = l[0]
self.assertEqual(d['description'],'t1')
self.assertEqual(d['k2'],'this is transaction metadata')
self.assertEqual(d['user_name'],'p3 u3')
self.assertEqual(d['description'], b't1')
self.assertEqual(d['k2'], 'this is transaction metadata')
self.assertEqual(d['user_name'], b'p3 u3')
# A common test body for index tests on undoInfo and undoLog. Before
# ZODB 3.4, they always returned a wrong number of results (one too
......
......@@ -9,7 +9,7 @@ with some data:
>>> import transaction
>>> blob = Blob()
>>> data = blob.open("w")
>>> data.write("I'm a happy Blob.")
>>> _ = data.write(b"I'm a happy Blob.")
>>> data.close()
We also need a database with a blob supporting storage. (We're going to use
......@@ -52,7 +52,7 @@ MVCC also works.
>>> transaction3 = transaction.TransactionManager()
>>> connection3 = database.open(transaction_manager=transaction3)
>>> f = connection.root()['myblob'].open('w')
>>> f.write('I am an ecstatic Blob.')
>>> _ = f.write(b'I am an ecstatic Blob.')
>>> f.close()
>>> transaction.commit()
>>> connection3.root()['myblob'].open('r').read()
......
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
Import/export support for blob data
===================================
......@@ -34,12 +20,14 @@ Put a couple blob objects in our database1 and on the filesystem:
>>> import time, os
>>> nothing = transaction.begin()
>>> data1 = 'x'*100000
>>> data1 = b'x'*100000
>>> blob1 = ZODB.blob.Blob()
>>> blob1.open('w').write(data1)
>>> data2 = 'y'*100000
>>> with blob1.open('w') as file:
... _ = file.write(data1)
>>> data2 = b'y'*100000
>>> blob2 = ZODB.blob.Blob()
>>> blob2.open('w').write(data2)
>>> with blob2.open('w') as file:
... _ = file.write(data2)
>>> d = PersistentMapping({'blob1':blob1, 'blob2':blob2})
>>> root1['blobdata'] = d
>>> transaction.commit()
......
......@@ -34,32 +34,37 @@ Put some revisions of a blob object in our database and on the filesystem:
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> blob = Blob()
>>> blob.open('w').write('this is blob data 0')
>>> with blob.open('w') as file:
... _ = file.write(b'this is blob data 0')
>>> root['blob'] = blob
>>> transaction.commit()
>>> tids.append(blob._p_serial)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 1')
>>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 1')
>>> transaction.commit()
>>> tids.append(blob._p_serial)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 2')
>>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 2')
>>> transaction.commit()
>>> tids.append(blob._p_serial)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 3')
>>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 3')
>>> transaction.commit()
>>> tids.append(blob._p_serial)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 4')
>>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 4')
>>> transaction.commit()
>>> tids.append(blob._p_serial)
......
##############################################################################
#
# Copyright (c) 2005-2007 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
Transaction support for Blobs
=============================
......@@ -27,7 +13,8 @@ We need a database with a blob supporting storage::
Putting a Blob into a Connection works like any other Persistent object::
>>> blob1 = ZODB.blob.Blob()
>>> blob1.open('w').write('this is blob 1')
>>> with blob1.open('w') as file:
... _ = file.write(b'this is blob 1')
>>> root1['blob1'] = blob1
>>> 'blob1' in root1
True
......@@ -62,7 +49,8 @@ state:
False
>>> blob1._p_blob_uncommitted
>>> blob1.open('w').write('this is new blob 1')
>>> with blob1.open('w') as file:
... _ = file.write(b'this is new blob 1')
>>> blob1.open().read()
'this is new blob 1'
>>> fname = blob1._p_blob_uncommitted
......@@ -120,14 +108,15 @@ when we start)::
>>> blob1afh3 = blob1a.open('a')
>>> bool(blob1a._p_changed)
True
>>> blob1afh3.write('woot!')
>>> _ = blob1afh3.write(b'woot!')
>>> blob1afh3.close()
We can open more than one blob object during the course of a single
transaction::
>>> blob2 = ZODB.blob.Blob()
>>> blob2.open('w').write('this is blob 3')
>>> with blob2.open('w') as file:
... _ = file.write(b'this is blob 3')
>>> root2['blob2'] = blob2
>>> transaction.commit()
......@@ -151,7 +140,7 @@ moment)::
>>> transaction.commit()
Traceback (most recent call last):
...
TypeError: coercing to Unicode: need string or buffer, BlobFile found
TypeError: ...
Abort for good measure::
......@@ -166,8 +155,8 @@ connections should result in a write conflict error::
>>> root4 = database.open(transaction_manager=tm2).root()
>>> blob1c3 = root3['blob1']
>>> blob1c4 = root4['blob1']
>>> blob1c3fh1 = blob1c3.open('a').write('this is from connection 3')
>>> blob1c4fh1 = blob1c4.open('a').write('this is from connection 4')
>>> blob1c3fh1 = blob1c3.open('a').write(b'this is from connection 3')
>>> blob1c4fh1 = blob1c4.open('a').write(b'this is from connection 4')
>>> tm1.commit()
>>> root3['blob1'].open('r').read()
'this is blob 1woot!this is from connection 3'
......@@ -215,14 +204,14 @@ We do support optimistic savepoints:
>>> root5 = connection5.root()
>>> blob = ZODB.blob.Blob()
>>> blob_fh = blob.open("w")
>>> blob_fh.write("I'm a happy blob.")
>>> _ = blob_fh.write(b"I'm a happy blob.")
>>> blob_fh.close()
>>> root5['blob'] = blob
>>> transaction.commit()
>>> root5['blob'].open("r").read()
"I'm a happy blob."
>>> blob_fh = root5['blob'].open("a")
>>> blob_fh.write(" And I'm singing.")
>>> _ = blob_fh.write(b" And I'm singing.")
>>> blob_fh.close()
>>> root5['blob'].open("r").read()
"I'm a happy blob. And I'm singing."
......@@ -248,7 +237,8 @@ the committed location again:
We support non-optimistic savepoints too:
>>> root5['blob'].open("a").write(" And I'm dancing.")
>>> with root5['blob'].open("a") as file:
... _ = file.write(b" And I'm dancing.")
>>> root5['blob'].open("r").read()
"I'm a happy blob. And I'm singing. And I'm dancing."
>>> savepoint = transaction.savepoint()
......@@ -259,7 +249,8 @@ Again, the savepoint creates a new savepoints directory:
... if name.startswith('savepoint')])
1
>>> root5['blob'].open("w").write(" And the weather is beautiful.")
>>> with root5['blob'].open("w") as file:
... _ = file.write(b" And the weather is beautiful.")
>>> savepoint.rollback()
>>> root5['blob'].open("r").read()
......@@ -283,7 +274,7 @@ file that can be opened.
>>> root6 = connection6.root()
>>> blob = ZODB.blob.Blob()
>>> blob_fh = blob.open("w")
>>> blob_fh.write("I'm a happy blob.")
>>> _ = blob_fh.write(b"I'm a happy blob.")
>>> blob_fh.close()
>>> root6['blob'] = blob
>>> transaction.commit()
......@@ -296,12 +287,13 @@ We can also read committed data by calling open with a 'c' flag:
This just returns a regular file object:
>>> type(f)
<type 'file'>
>>> type(f) == file_type
True
and doesn't prevent us from opening the blob for writing:
>>> blob.open('w').write('x')
>>> with blob.open('w') as file:
... _ = file.write(b'x')
>>> blob.open().read()
'x'
......@@ -325,7 +317,8 @@ uncommitted changes:
...
BlobError: Uncommitted changes
>>> blob.open('w').write("I'm a happy blob.")
>>> with blob.open('w') as file:
... _ = file.write(b"I'm a happy blob.")
>>> root6['blob6'] = blob
>>> blob.committed()
Traceback (most recent call last):
......@@ -354,10 +347,12 @@ uncommitted changes:
You can't open a committed blob file for writing:
>>> open(blob.committed(), 'w') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
IOError: ...
>>> try:
... open(blob.committed(), 'w') # doctest: +ELLIPSIS
... except:
... # Produces IOError in Py2 and PermissionError in Py3
... print('Error raised.')
Error raised.
tpc_abort
---------
......@@ -368,11 +363,13 @@ stored are discarded.
>>> olddata, oldserial = blob_storage.load(blob._p_oid, '')
>>> t = transaction.get()
>>> blob_storage.tpc_begin(t)
>>> open('blobfile', 'w').write('This data should go away')
>>> with open('blobfile', 'wb') as file:
... _ = file.write(b'This data should go away')
>>> s1 = blob_storage.storeBlob(blob._p_oid, oldserial, olddata, 'blobfile',
... '', t)
>>> new_oid = blob_storage.new_oid()
>>> open('blobfile2', 'w').write('This data should go away too')
>>> with open('blobfile2', 'wb') as file:
... _ = file.write(b'This data should go away too')
>>> s2 = blob_storage.storeBlob(new_oid, '\0'*8, olddata, 'blobfile2',
... '', t)
......
......@@ -187,10 +187,10 @@ class FileStorageTests(
# of the largest oid in use.
t = transaction.Transaction()
self._storage.tpc_begin(t)
giant_oid = '\xee' * 8
giant_oid = b'\xee' * 8
# Store an object.
# oid, serial, data, version, transaction
r1 = self._storage.store(giant_oid, '\0'*8, 'data', '', t)
r1 = self._storage.store(giant_oid, b'\0'*8, b'data', b'', t)
# Finish the transaction.
r2 = self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
......@@ -204,10 +204,10 @@ class FileStorageTests(
# ZRS recovery, use the .restore() method, this is plain critical.
t = transaction.Transaction()
self._storage.tpc_begin(t)
giant_oid = '\xee' * 8
giant_oid = b'\xee' * 8
# Store an object.
# oid, serial, data, version, prev_txn, transaction
r1 = self._storage.restore(giant_oid, '\0'*8, 'data', '', None, t)
r1 = self._storage.restore(giant_oid, b'\0'*8, b'data', b'', None, t)
# Finish the transaction.
r2 = self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
......@@ -286,7 +286,7 @@ class FileStorageTests(
expected_data, expected_tid = self._storage.load(oid, '')
self.assertEqual(expected_data, data)
self.assertEqual(expected_tid, tid)
if x == '\002':
if x == b'\002':
self.assertEqual(next_oid, None)
else:
self.assertNotEqual(next_oid, None)
......@@ -374,13 +374,13 @@ class AnalyzeDotPyTest(StorageTestBase.StorageTestBase):
self._storage = ZODB.FileStorage.FileStorage("Source.fs", create=True)
def checkanalyze(self):
import new, sys, pickle
import types, sys, pickle
from BTrees.OOBTree import OOBTree
from ZODB.scripts import analyze
# Set up a module to act as a broken import
module_name = 'brokenmodule'
module = new.module(module_name)
module = types.ModuleType(module_name)
sys.modules[module_name] = module
class Broken(MinPO):
......@@ -426,8 +426,7 @@ class AnalyzeDotPyTest(StorageTestBase.StorageTestBase):
analyze.analyze_trans(rep, txn)
# from ZODB.scripts.analyze.report
typemap = rep.TYPEMAP.keys()
typemap.sort()
typemap = sorted(rep.TYPEMAP.keys())
cumpct = 0.0
for t in typemap:
pct = rep.TYPESIZE[t] * 100.0 / rep.DBYTES
......@@ -605,19 +604,19 @@ def deal_with_finish_failures():
>>> import zope.testing.loggingsupport
>>> handler = zope.testing.loggingsupport.InstalledHandler(
... 'ZODB.FileStorage')
>>> transaction.commit()
>>> transaction.commit() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: <lambda>() takes no arguments (1 given)
TypeError: <lambda>() takes ...
>>> print handler
>>> print(handler)
ZODB.FileStorage CRITICAL
Failure in _finish. Closing.
>>> handler.uninstall()
>>> fs.load('\0'*8, '') # doctest: +ELLIPSIS
>>> fs.load(b'\0'*8, b'') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...
......@@ -645,7 +644,7 @@ def pack_with_open_blob_files():
>>> conn1.root()[1] = ZODB.blob.Blob()
>>> conn1.add(conn1.root()[1])
>>> with conn1.root()[1].open('w') as file:
... file.write('some data')
... _ = file.write(b'some data')
>>> tm1.commit()
>>> tm2 = transaction.TransactionManager()
......@@ -654,7 +653,7 @@ def pack_with_open_blob_files():
>>> conn1.root()[2] = ZODB.blob.Blob()
>>> conn1.add(conn1.root()[2])
>>> with conn1.root()[2].open('w') as file:
... file.write('some more data')
... _ = file.write(b'some more data')
>>> db.pack()
>>> f.read()
......@@ -682,7 +681,8 @@ def test_suite():
suite.addTest(unittest.makeSuite(klass, "check"))
suite.addTest(doctest.DocTestSuite(
setUp=zope.testing.setupstack.setUpDirectory,
tearDown=zope.testing.setupstack.tearDown))
tearDown=zope.testing.setupstack.tearDown,
checker=ZODB.tests.util.checker))
suite.addTest(ZODB.tests.testblob.storage_reusable_suite(
'BlobFileStorage',
lambda name, blob_dir:
......
......@@ -11,7 +11,6 @@
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from pickle import Pickler, Unpickler
from ZODB.blob import Blob
from ZODB.DB import DB
......@@ -47,6 +46,13 @@ except ImportError:
# Py3
from io import BytesIO
try:
file_type = file
except NameError:
# Py3: Python 3 does not have a file type.
import io
file_type = io.BufferedReader
def new_time():
"""Create a _new_ time stamp.
......@@ -163,20 +169,20 @@ class BlobUndoTests(BlobTestBase):
transaction.begin()
blob = Blob()
with blob.open('w') as file:
file.write('this is state 1')
file.write(b'this is state 1')
root['blob'] = blob
transaction.commit()
transaction.begin()
blob = root['blob']
with blob.open('w') as file:
file.write('this is state 2')
file.write(b'this is state 2')
transaction.commit()
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
self.assertEqual(blob.open('r').read(), 'this is state 1')
self.assertEqual(blob.open('r').read(), b'this is state 1')
database.close()
......@@ -185,8 +191,8 @@ class BlobUndoTests(BlobTestBase):
connection = database.open()
root = connection.root()
transaction.begin()
with open('consume1', 'w') as file:
file.write('this is state 1')
with open('consume1', 'wb') as file:
file.write(b'this is state 1')
blob = Blob()
blob.consumeFile('consume1')
root['blob'] = blob
......@@ -194,15 +200,15 @@ class BlobUndoTests(BlobTestBase):
transaction.begin()
blob = root['blob']
with open('consume2', 'w') as file:
file.write('this is state 2')
with open('consume2', 'wb') as file:
file.write(b'this is state 2')
blob.consumeFile('consume2')
transaction.commit()
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
self.assertEqual(blob.open('r').read(), 'this is state 1')
self.assertEqual(blob.open('r').read(), b'this is state 1')
database.close()
......@@ -214,25 +220,25 @@ class BlobUndoTests(BlobTestBase):
transaction.begin()
with blob.open('w') as file:
file.write('this is state 1')
file.write(b'this is state 1')
root['blob'] = blob
transaction.commit()
transaction.begin()
blob = root['blob']
with blob.open('w') as file:
file.write('this is state 2')
file.write(b'this is state 2')
transaction.commit()
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
self.assertEqual(blob.open('r').read(), 'this is state 1')
self.assertEqual(blob.open('r').read(), b'this is state 1')
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
self.assertEqual(blob.open('r').read(), 'this is state 2')
self.assertEqual(blob.open('r').read(), b'this is state 2')
database.close()
......@@ -244,7 +250,7 @@ class BlobUndoTests(BlobTestBase):
transaction.begin()
with blob.open('w') as file:
file.write('this is state 1')
file.write(b'this is state 1')
root['blob'] = blob
transaction.commit()
......@@ -256,7 +262,7 @@ class BlobUndoTests(BlobTestBase):
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
self.assertEqual(blob.open('r').read(), 'this is state 1')
self.assertEqual(blob.open('r').read(), b'this is state 1')
database.close()
......@@ -283,19 +289,19 @@ class RecoveryBlobStorage(BlobTestBase,
transaction.commit()
conn.root()[2] = ZODB.blob.Blob()
with conn.root()[2].open('w') as file:
file.write('some data')
file.write(b'some data')
transaction.commit()
conn.root()[3] = ZODB.blob.Blob()
with conn.root()[3].open('w') as file:
file.write(
(''.join(struct.pack(">I", random.randint(0, (1<<32)-1))
(b''.join(struct.pack(">I", random.randint(0, (1<<32)-1))
for i in range(random.randint(10000,20000)))
)[:-random.randint(1,4)]
)
transaction.commit()
conn.root()[2] = ZODB.blob.Blob()
with conn.root()[2].open('w') as file:
file.write('some other data')
file.write(b'some other data')
transaction.commit()
self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst)
......@@ -305,7 +311,7 @@ def gc_blob_removes_uncommitted_data():
"""
>>> blob = Blob()
>>> with blob.open('w') as file:
... file.write('x')
... _ = file.write(b'x')
>>> fname = blob._p_blob_uncommitted
>>> os.path.exists(fname)
True
......@@ -341,7 +347,7 @@ def commit_from_wrong_partition():
>>> from ZODB.blob import Blob
>>> root['blob'] = Blob()
>>> with root['blob'].open('w') as file:
... file.write('test')
... _ = file.write(b'test')
>>> transaction.commit() # doctest: +ELLIPSIS
Copied blob file ...
......@@ -352,7 +358,7 @@ Works with savepoints too:
>>> root['blob2'] = Blob()
>>> with root['blob2'].open('w') as file:
... file.write('test2')
... _ = file.write(b'test2')
>>> _ = transaction.savepoint() # doctest: +ELLIPSIS
Copied blob file ...
......@@ -392,7 +398,7 @@ def packing_with_uncommitted_data_non_undoing():
>>> root['blob'] = Blob()
>>> connection.add(root['blob'])
>>> with root['blob'].open('w') as file:
... file.write('test')
... _ = file.write(b'test')
>>> blob_storage.pack(new_time(), referencesf)
......@@ -420,7 +426,7 @@ def packing_with_uncommitted_data_undoing():
>>> root['blob'] = Blob()
>>> connection.add(root['blob'])
>>> with root['blob'].open('w') as file:
... file.write('test')
... _ = file.write(b'test')
>>> blob_storage.pack(new_time(), referencesf)
......@@ -447,10 +453,10 @@ def secure_blob_directory():
They are only accessible by the owner:
>>> oct(os.stat('blobs').st_mode)
'040700'
>>> oct(os.stat(tmp_dir).st_mode)
'040700'
>>> oct(os.stat('blobs').st_mode)[-5:]
'40700'
>>> oct(os.stat(tmp_dir).st_mode)[-5:]
'40700'
These settings are recognized as secure:
......@@ -462,7 +468,7 @@ def secure_blob_directory():
After making the permissions of tmp_dir more liberal, the directory is
recognized as insecure:
>>> os.chmod(tmp_dir, 040711)
>>> os.chmod(tmp_dir, 0o40711)
>>> blob_storage.fshelper.isSecure(tmp_dir)
False
......@@ -493,7 +499,7 @@ def loadblob_tmpstore():
>>> root['blob'] = Blob()
>>> connection.add(root['blob'])
>>> with root['blob'].open('w') as file:
... file.write('test')
... _ = file.write(b'test')
>>> import transaction
>>> transaction.commit()
>>> blob_oid = root['blob']._p_oid
......@@ -531,11 +537,11 @@ def is_blob_record():
An invalid pickle yields a false value:
>>> ZODB.blob.is_blob_record("Hello world!")
>>> ZODB.blob.is_blob_record(b"Hello world!")
False
>>> ZODB.blob.is_blob_record('c__main__\nC\nq\x01.')
>>> ZODB.blob.is_blob_record(b'c__main__\nC\nq\x01.')
False
>>> ZODB.blob.is_blob_record('cWaaaa\nC\nq\x01.')
>>> ZODB.blob.is_blob_record(b'cWaaaa\nC\nq\x01.')
False
As does None, which may occur in delete records:
......@@ -556,7 +562,7 @@ def do_not_depend_on_cwd():
>>> conn = db.open()
>>> conn.root()['blob'] = ZODB.blob.Blob()
>>> with conn.root()['blob'].open('w') as file:
... file.write('data')
... _ = file.write(b'data')
>>> transaction.commit()
>>> os.chdir(here)
>>> conn.root()['blob'].open().read()
......@@ -571,15 +577,15 @@ def savepoint_isolation():
>>> bs = create_storage()
>>> db = DB(bs)
>>> conn = db.open()
>>> conn.root.b = ZODB.blob.Blob('initial')
>>> conn.root.b = ZODB.blob.Blob(b'initial')
>>> transaction.commit()
>>> with conn.root.b.open('w') as file:
... file.write('1')
... _ = file.write(b'1')
>>> _ = transaction.savepoint()
>>> tm = transaction.TransactionManager()
>>> conn2 = db.open(transaction_manager=tm)
>>> with conn2.root.b.open('w') as file:
... file.write('2')
... _ = file.write(b'2')
>>> _ = tm.savepoint()
>>> conn.root.b.open().read()
'1'
......@@ -601,16 +607,16 @@ def savepoint_commits_without_invalidations_out_of_order():
>>> db = DB(bs)
>>> tm1 = transaction.TransactionManager()
>>> conn1 = db.open(transaction_manager=tm1)
>>> conn1.root.b = ZODB.blob.Blob('initial')
>>> conn1.root.b = ZODB.blob.Blob(b'initial')
>>> tm1.commit()
>>> with conn1.root.b.open('w') as file:
... file.write('1')
... _ = file.write(b'1')
>>> _ = tm1.savepoint()
>>> tm2 = transaction.TransactionManager()
>>> conn2 = db.open(transaction_manager=tm2)
>>> with conn2.root.b.open('w') as file:
... file.write('2')
... _ = file.write(b'2')
>>> _ = tm1.savepoint()
>>> conn1.root.b.open().read()
'1'
......@@ -635,17 +641,17 @@ def savepoint_cleanup():
>>> db = DB(bs)
>>> conn = db.open()
>>> conn.root.b = ZODB.blob.Blob('initial')
>>> conn.root.b = ZODB.blob.Blob(b'initial')
>>> _ = transaction.savepoint()
>>> len(os.listdir(tdir))
1
>>> transaction.abort()
>>> os.listdir(tdir)
[]
>>> conn.root.b = ZODB.blob.Blob('initial')
>>> conn.root.b = ZODB.blob.Blob(b'initial')
>>> transaction.commit()
>>> with conn.root.b.open('w') as file:
... file.write('1')
... _ = file.write(b'1')
>>> _ = transaction.savepoint()
>>> transaction.abort()
>>> os.listdir(tdir)
......@@ -656,7 +662,7 @@ def savepoint_cleanup():
def lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop():
r"""
>>> conn = ZODB.connection('data.fs', blob_dir='blobs')
>>> blob = ZODB.blob.Blob('blah')
>>> blob = ZODB.blob.Blob(b'blah')
>>> conn.add(blob)
>>> transaction.commit()
>>> old_serial = blob._p_serial
......@@ -703,13 +709,14 @@ def storage_reusable_suite(prefix, factory,
return factory(name, blob_dir)
test.globs['create_storage'] = create_storage
test.globs['file_type'] = file_type
suite = unittest.TestSuite()
suite.addTest(doctest.DocFileSuite(
"blob_connection.txt", "blob_importexport.txt",
"blob_transaction.txt",
setUp=setup, tearDown=zope.testing.setupstack.tearDown,
optionflags=doctest.ELLIPSIS,
optionflags=doctest.ELLIPSIS, checker=ZODB.tests.util.checker
))
if test_packing:
suite.addTest(doctest.DocFileSuite(
......@@ -718,7 +725,8 @@ def storage_reusable_suite(prefix, factory,
))
suite.addTest(doctest.DocTestSuite(
setUp=setup, tearDown=zope.testing.setupstack.tearDown,
checker = zope.testing.renormalizing.RENormalizing([
checker = ZODB.tests.util.checker + \
zope.testing.renormalizing.RENormalizing([
(re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'),
(re.compile(r'\%(sep)s' % dict(sep=os.path.sep)), '/'),
]),
......
......@@ -54,6 +54,8 @@ checker = renormalizing.RENormalizing([
r"InvalidObjectReference"),
(re.compile("ZODB.POSException.ReadOnlyHistoryError"),
r"ReadOnlyHistoryError"),
(re.compile("ZODB.POSException.Unsupported"),
r"Unsupported"),
(re.compile("ZConfig.ConfigurationSyntaxError"),
r"ConfigurationSyntaxError"),
])
......
......@@ -16,6 +16,7 @@ deps =
zdaemon
zope.interface
zope.testing
zope.testrunner
[testenv:coverage]
basepython =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment