Commit 266cadf9 authored by Stephan Richter's avatar Stephan Richter

Fixed up as many unit tests as I could. There are between 13-16

failures left.
parent de1ed537
......@@ -21,6 +21,7 @@ from __future__ import print_function
import threading
import time
import logging
import sys
from struct import pack as _structpack, unpack as _structunpack
import zope.interface
......@@ -41,7 +42,6 @@ except ImportError:
log = logging.getLogger("ZODB.BaseStorage")
import sys
class BaseStorage(UndoLogCompatible):
......
......@@ -13,30 +13,31 @@
##############################################################################
import logging
import sys
from pickle import PicklingError
import six
import zope.interface
from ZODB.POSException import ConflictError
from ZODB.loglevels import BLATHER
from ZODB.serialize import _protocol
from ZODB.serialize import _protocol, _Unpickler
try:
from cStringIO import StringIO
from cStringIO import StringIO as BytesIO
except ImportError:
# Py3
from io import StringIO
from io import BytesIO
try:
from cPickle import Unpickler, Pickler
from cPickle import Pickler
except ImportError:
# Py3
from pickle import Unpickler, Pickler
from pickle import Pickler
logger = logging.getLogger('ZODB.ConflictResolution')
ResolvedSerial = 'rs'
ResolvedSerial = b'rs'
class BadClassName(Exception):
pass
......@@ -69,8 +70,8 @@ def find_global(*args):
if cls == 1:
# Not importable
if (isinstance(args, tuple) and len(args) == 2 and
isinstance(args[0], basestring) and
isinstance(args[1], basestring)
isinstance(args[0], six.string_types) and
isinstance(args[1], six.string_types)
):
return BadClass(*args)
else:
......@@ -80,8 +81,8 @@ def find_global(*args):
def state(self, oid, serial, prfactory, p=''):
p = p or self.loadSerial(oid, serial)
p = self._crs_untransform_record_data(p)
file = StringIO(p)
unpickler = Unpickler(file)
file = BytesIO(p)
unpickler = _Unpickler(file)
unpickler.find_global = find_global
unpickler.persistent_load = prfactory.persistent_load
unpickler.load() # skip the class tuple
......@@ -221,13 +222,13 @@ def persistent_id(object):
_unresolvable = {}
def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
committedData=''):
committedData=b''):
# class_tuple, old, committed, newstate = ('',''), 0, 0, 0
try:
prfactory = PersistentReferenceFactory()
newpickle = self._crs_untransform_record_data(newpickle)
file = StringIO(newpickle)
unpickler = Unpickler(file)
file = BytesIO(newpickle)
unpickler = _Unpickler(file)
unpickler.find_global = find_global
unpickler.persistent_load = prfactory.persistent_load
meta = unpickler.load()
......@@ -269,7 +270,7 @@ def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
resolved = resolve(old, committed, newstate)
file = StringIO()
file = BytesIO()
pickler = Pickler(file, _protocol)
if sys.version_info[0] < 3:
pickler.inst_persistent_id = persistent_id
......@@ -277,7 +278,7 @@ def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
pickler.persistent_id = persistent_id
pickler.dump(meta)
pickler.dump(resolved)
return self._crs_transform_record_data(file.getvalue(1))
return self._crs_transform_record_data(file.getvalue())
except (ConflictError, BadClassName):
pass
except:
......
......@@ -86,7 +86,7 @@ use:
.. -> src
>>> import ConflictResolution_txt
>>> exec src in ConflictResolution_txt.__dict__
>>> exec(src, ConflictResolution_txt.__dict__)
>>> PCounter = ConflictResolution_txt.PCounter
>>> PCounter.__module__ = 'ConflictResolution_txt'
......@@ -198,7 +198,7 @@ Here's an example of a broken _p_resolveConflict method::
.. -> src
>>> exec src in ConflictResolution_txt.__dict__
>>> exec(src, ConflictResolution_txt.__dict__)
>>> PCounter2 = ConflictResolution_txt.PCounter2
>>> PCounter2.__module__ = 'ConflictResolution_txt'
......@@ -293,11 +293,11 @@ and newState [#get_persistent_reference]_. They have an oid, `weak` is
False, and `database_name` is None. `klass` happens to be set but this is
not always the case.
>>> isinstance(new.oid, str)
>>> isinstance(new.oid, bytes)
True
>>> new.weak
False
>>> print new.database_name
>>> print(new.database_name)
None
>>> new.klass is PCounter
True
......@@ -431,7 +431,7 @@ integrity issues.
.. -> src
>>> exec src in ConflictResolution_txt.__dict__
>>> exec(src, ConflictResolution_txt.__dict__)
>>> PCounter3 = ConflictResolution_txt.PCounter3
>>> PCounter3.__module__ = 'ConflictResolution_txt'
......@@ -475,47 +475,47 @@ integrity issues.
>>> from ZODB.ConflictResolution import PersistentReference
>>> ref1 = PersistentReference('my_oid')
>>> ref1 = PersistentReference(b'my_oid')
>>> ref1.oid
'my_oid'
>>> print ref1.klass
>>> print(ref1.klass)
None
>>> print ref1.database_name
>>> print(ref1.database_name)
None
>>> ref1.weak
False
>>> ref2 = PersistentReference(('my_oid', 'my_class'))
>>> ref2 = PersistentReference((b'my_oid', 'my_class'))
>>> ref2.oid
'my_oid'
>>> ref2.klass
'my_class'
>>> print ref2.database_name
>>> print(ref2.database_name)
None
>>> ref2.weak
False
>>> ref3 = PersistentReference(['w', ('my_oid',)])
>>> ref3 = PersistentReference(['w', (b'my_oid',)])
>>> ref3.oid
'my_oid'
>>> print ref3.klass
>>> print(ref3.klass)
None
>>> print ref3.database_name
>>> print(ref3.database_name)
None
>>> ref3.weak
True
>>> ref3a = PersistentReference(['w', ('my_oid', 'other_db')])
>>> ref3a = PersistentReference(['w', (b'my_oid', 'other_db')])
>>> ref3a.oid
'my_oid'
>>> print ref3a.klass
>>> print(ref3a.klass)
None
>>> ref3a.database_name
'other_db'
>>> ref3a.weak
True
>>> ref4 = PersistentReference(['m', ('other_db', 'my_oid', 'my_class')])
>>> ref4 = PersistentReference(['m', ('other_db', b'my_oid', 'my_class')])
>>> ref4.oid
'my_oid'
>>> ref4.klass
......@@ -525,22 +525,22 @@ integrity issues.
>>> ref4.weak
False
>>> ref5 = PersistentReference(['n', ('other_db', 'my_oid')])
>>> ref5 = PersistentReference(['n', ('other_db', b'my_oid')])
>>> ref5.oid
'my_oid'
>>> print ref5.klass
>>> print(ref5.klass)
None
>>> ref5.database_name
'other_db'
>>> ref5.weak
False
>>> ref6 = PersistentReference(['my_oid']) # legacy
>>> ref6 = PersistentReference([b'my_oid']) # legacy
>>> ref6.oid
'my_oid'
>>> print ref6.klass
>>> print(ref6.klass)
None
>>> print ref6.database_name
>>> print(ref6.database_name)
None
>>> ref6.weak
True
......
......@@ -48,12 +48,13 @@ from ZODB.POSException import ConflictError, ReadConflictError
from ZODB.POSException import Unsupported, ReadOnlyHistoryError
from ZODB.POSException import POSKeyError
from ZODB.serialize import ObjectWriter, ObjectReader
from ZODB.utils import p64, u64, z64, oid_repr, positive_id, bytes
from ZODB.utils import p64, u64, z64, oid_repr, positive_id
from ZODB import utils
import six
global_reset_counter = 0
def resetCaches():
"""Causes all connection caches to be reset as connections are reopened.
......@@ -494,7 +495,7 @@ class Connection(ExportImport, object):
if invalidated is None:
# special value: the transaction is so old that
# we need to flush the whole cache.
self._cache.invalidate(self._cache.cache_data.keys())
self._cache.invalidate(list(self._cache.cache_data.keys()))
elif invalidated:
self._cache.invalidate(invalidated)
......@@ -1013,7 +1014,7 @@ class Connection(ExportImport, object):
for k,v in items:
del everything[k]
# return a list of [ghosts....not recently used.....recently used]
return everything.items() + items
return list(everything.items()) + items
def open(self, transaction_manager=None, delegate=True):
"""Register odb, the DB that this Connection uses.
......
......@@ -535,9 +535,7 @@ class DB(object):
detail[c] = 1
self._connectionMap(f)
detail = detail.items()
detail.sort()
return detail
return sorted(detail.items())
def cacheExtremeDetail(self):
detail = []
......@@ -611,8 +609,9 @@ class DB(object):
'ngsize': con._cache.cache_non_ghost_count,
'size': len(con._cache)})
self._connectionMap(f)
m.sort()
return m
# Py3: Simulate Python 2 m.sort() functionality.
return sorted(
m, key=lambda x: (x['connection'], x['ngsize'], x['size']))
def close(self):
"""Close the database and its underlying storage.
......@@ -932,7 +931,7 @@ class DB(object):
raise NotImplementedError
if txn is None:
txn = transaction.get()
if isinstance(ids, basestring):
if isinstance(ids, six.string_types):
ids = [ids]
txn.join(TransactionalUndo(self, ids))
......
......@@ -55,8 +55,7 @@ Let's add some data:
>>> db = DB(storage)
>>> conn = db.open()
>>> items = conn.root()['1'].items()
>>> items.sort()
>>> items = sorted(conn.root()['1'].items())
>>> items
[('a', 1), ('b', 2)]
......@@ -106,7 +105,7 @@ the new underlying storages:
The object id of the new object is quite random, and typically large:
>>> print u64(conn.root()['2']._p_oid)
>>> print(u64(conn.root()['2']._p_oid))
3553260803050964942
Let's look at some other methods:
......@@ -201,7 +200,8 @@ DemoStorage supports Blobs if the changes database supports blobs.
>>> db = DB(base)
>>> conn = db.open()
>>> conn.root()['blob'] = ZODB.blob.Blob()
>>> conn.root()['blob'].open('w').write('state 1')
>>> with conn.root()['blob'].open('w') as file:
... _ = file.write(b'state 1')
>>> transaction.commit()
>>> db.close()
......@@ -216,7 +216,8 @@ DemoStorage supports Blobs if the changes database supports blobs.
>>> conn.root()['blob'].open().read()
'state 1'
>>> _ = transaction.begin()
>>> conn.root()['blob'].open('w').write('state 2')
>>> with conn.root()['blob'].open('w') as file:
... _ = file.write(b'state 2')
>>> transaction.commit()
>>> conn.root()['blob'].open().read()
'state 2'
......@@ -238,7 +239,8 @@ It isn't necessary for the base database to support blobs.
>>> _ = transaction.begin()
>>> conn.root()['blob2'] = ZODB.blob.Blob()
>>> conn.root()['blob2'].open('w').write('state 1')
>>> with conn.root()['blob2'].open('w') as file:
... _ = file.write(b'state 1')
>>> conn.root()['blob2'].open().read()
'state 1'
......@@ -263,7 +265,8 @@ storage wrapped around it when necessary:
'BlobStorage'
>>> _ = transaction.begin()
>>> conn.root()['blob'].open('w').write('state 2')
>>> with conn.root()['blob'].open('w') as file:
... _ = file.write(b'state 2')
>>> transaction.commit()
>>> conn.root()['blob'].open().read()
'state 2'
......@@ -358,7 +361,7 @@ To test this, we need to hack random.randint a bit.
>>> rv = 42
>>> def faux_randint(min, max):
... print 'called randint'
... print('called randint')
... global rv
... rv += 1000
... return rv
......
......@@ -17,6 +17,7 @@ import os
from tempfile import TemporaryFile
import logging
import six
import sys
from ZODB.blob import Blob
from ZODB.interfaces import IBlobStorage
......
......@@ -26,7 +26,7 @@ from ZODB.FileStorage.fspack import FileStoragePacker
from ZODB.fsIndex import fsIndex
from ZODB import BaseStorage, ConflictResolution, POSException
from ZODB.POSException import UndoError, POSKeyError, MultipleUndoErrors
from ZODB.utils import p64, u64, z64, as_bytes, as_text, bytes
from ZODB.utils import p64, u64, z64, as_bytes, as_text
import binascii
import contextlib
......@@ -48,10 +48,10 @@ except ImportError:
from pickle import Pickler, loads
try:
from base64 import decodestring as decodebytes, encodestring as decodebytes
except ImportError:
# Py3
from base64 import decodebytes, encodebytes
except ImportError:
from base64 import decodestring as decodebytes, encodestring as encodebytes
# Not all platforms have fsync
......@@ -1901,7 +1901,7 @@ class TransactionRecordIterator(FileStorageFormatter):
def __iter__(self):
return self
def next(self):
def __next__(self):
pos = self._pos
while pos < self._tend:
# Read the data records for this transaction
......@@ -1934,6 +1934,8 @@ class TransactionRecordIterator(FileStorageFormatter):
raise StopIteration()
next = __next__
class Record(BaseStorage.DataRecord):
......
......@@ -82,7 +82,7 @@ directory for blobs is kept.)
>>> conn = db.open()
>>> conn.root()[1] = ZODB.blob.Blob()
>>> with conn.root()[1].open('w') as file:
... file.write(b'some data')
... _ = file.write(b'some data')
>>> conn.root()[2] = ZODB.blob.Blob()
>>> with conn.root()[2].open('w') as file:
... _ = file.write(b'some data')
......
......@@ -116,10 +116,7 @@ class ConflictError(POSError, transaction.interfaces.TransientError):
if data is not None:
# avoid circular import chain
from ZODB.utils import get_pickle_metadata
self.class_name = "%s.%s" % get_pickle_metadata(data)
## else:
## if message != "data read conflict error":
## raise RuntimeError
self.class_name = '.'.join(get_pickle_metadata(data))
self.serials = serials
......
......@@ -359,13 +359,13 @@ class FilesystemHelper:
log("Blob temporary directory '%s' does not exist. "
"Created new directory." % self.temp_dir)
if not os.path.exists(os.path.join(self.base_dir, LAYOUT_MARKER)):
layout_marker = open(
os.path.join(self.base_dir, LAYOUT_MARKER), 'wb')
layout_marker.write(utils.as_bytes(self.layout_name))
layout_marker_path = os.path.join(self.base_dir, LAYOUT_MARKER)
if not os.path.exists(layout_marker_path):
with open(layout_marker_path, 'w') as layout_marker:
layout_marker.write(self.layout_name)
else:
layout = open(os.path.join(self.base_dir, LAYOUT_MARKER), 'rb'
).read().strip()
with open(layout_marker_path, 'r') as layout_marker:
layout = open(layout_marker_path, 'r').read().strip()
if layout != self.layout_name:
raise ValueError(
"Directory layout `%s` selected for blob directory %s, but "
......@@ -517,7 +517,7 @@ def auto_layout_select(path):
# use.
layout_marker = os.path.join(path, LAYOUT_MARKER)
if os.path.exists(layout_marker):
layout = open(layout_marker, 'rb').read()
layout = open(layout_marker, 'r').read()
layout = layout.strip()
log('Blob directory `%s` has layout marker set. '
'Selected `%s` layout. ' % (path, layout), level=logging.DEBUG)
......@@ -559,7 +559,8 @@ class BushyLayout(object):
# Create the bushy directory structure with the least significant byte
# first
for byte in oid.decode():
directories.append('0x%s' % binascii.hexlify(byte.encode()))
directories.append(
'0x%s' % binascii.hexlify(byte.encode()).decode())
return os.path.sep.join(directories)
def path_to_oid(self, path):
......@@ -568,7 +569,7 @@ class BushyLayout(object):
path = path.split(os.path.sep)
# Each path segment stores a byte in hex representation. Turn it into
# an int and then get the character for our byte string.
oid = ''.join(binascii.unhexlify(byte[2:]) for byte in path)
oid = b''.join(binascii.unhexlify(byte[2:]) for byte in path)
return oid
def getBlobFilePath(self, oid, tid):
......@@ -599,7 +600,7 @@ class LawnLayout(BushyLayout):
# OID z64.
raise TypeError()
return utils.repr_to_oid(path)
except TypeError:
except (TypeError, binascii.Error):
raise ValueError('Not a valid OID path: `%s`' % path)
LAYOUTS['lawn'] = LawnLayout()
......
......@@ -215,7 +215,7 @@ def scan(f, pos):
s = 0
while 1:
l = data.find(".", s)
l = data.find(b".", s)
if l < 0:
pos += len(data)
break
......
......@@ -64,7 +64,7 @@ historical state.
>>> conn.root()['first']['count']
1
>>> historical_conn.root().keys()
>>> sorted(historical_conn.root().keys())
['first']
>>> historical_conn.root()['first']['count']
0
......@@ -96,7 +96,7 @@ commit.
>>> historical_conn = db.open(transaction_manager=transaction1,
... at=historical_serial)
>>> historical_conn.root().keys()
>>> sorted(historical_conn.root().keys())
['first']
>>> historical_conn.root()['first']['count']
0
......@@ -110,7 +110,7 @@ root.
>>> serial = conn.root()._p_serial
>>> historical_conn = db.open(
... transaction_manager=transaction1, before=serial)
>>> historical_conn.root().keys()
>>> sorted(historical_conn.root().keys())
['first']
>>> historical_conn.root()['first']['count']
0
......@@ -120,7 +120,7 @@ underlying mechanism is a storage's loadBefore method. When you look at a
connection's ``before`` attribute, it is normalized into a ``before`` serial,
no matter what you pass into ``db.open``.
>>> print conn.before
>>> print(conn.before)
None
>>> historical_conn.before == serial
True
......
......@@ -26,12 +26,13 @@ Let's look at an example:
... return self.name, self.kind
>>> import ZODB.persistentclass
>>> class C:
... __metaclass__ = ZODB.persistentclass.PersistentMetaClass
... __init__ = __init__
... __module__ = '__zodb__'
... foo = foo
... kind = 'sample'
>>> C = ZODB.persistentclass.PersistentMetaClass(
... 'C', (object, ), dict(
... __init__ = __init__,
... __module__ = '__zodb__',
... foo = foo,
... kind = 'sample',
... ))
This example is obviously a bit contrived. In particular, we defined
the methods outside of the class. Why? Because all of the items in a
......@@ -65,8 +66,9 @@ We can create and use instances of the class:
We can modify the class and none of the persistent attributes will
change because the object hasn't been saved.
>>> import six
>>> def bar(self):
... print 'bar', self.name
... six.print_('bar', self.name)
>>> C.bar = bar
>>> c.bar()
bar first
......@@ -102,7 +104,7 @@ values:
Now, if we modify the class:
>>> def baz(self):
... print 'baz', self.name
... six.print_('baz', self.name)
>>> C.baz = baz
>>> c.baz()
baz first
......
......@@ -165,7 +165,7 @@ else:
return super(_Unpickler, self).find_class(modulename, name)
return self.find_global(modulename, name)
_oidtypes = str, type(None)
_oidtypes = bytes, type(None)
# Py3: Python 3 uses protocol 3 by default, which is not loadable by Python
# 2. If we want this, we can add a condition here for Python 3.
......@@ -697,9 +697,15 @@ def get_refs(a_pickle):
refs = []
u = pickle.Unpickler(BytesIO(a_pickle))
if sys.version_info[0] < 3:
u.persistent_load = refs
u.noload()
u.noload()
else:
# Py3: There is no `noload()` in Python 3.
u.persistent_load = refs.append
u.load()
u.load()
# Now we have a list of referencs. Need to convert to list of
# oids and class info:
......
......@@ -15,7 +15,6 @@
import functools
from persistent import Persistent
@functools.total_ordering
class MinPO(Persistent):
def __init__(self, value=None):
self.value = value
......@@ -23,12 +22,18 @@ class MinPO(Persistent):
def __cmp__(self, aMinPO):
return cmp(self.value, aMinPO.value)
# Py3: Python 3 does not support cmp() anymore.
# Py3: Python 3 does not support cmp() anymore. This is insane!!
def __eq__(self, aMinPO):
return self.value == aMinPO.value
def __ne__(self, aMinPO):
return self.value != aMinPO.value
def __gt__(self, aMinPO):
return self.value > aMinPO.value
def __lt__(self, aMinPO):
return self.value <= aMinPO.value
return self.value < aMinPO.value
def __repr__(self):
return "MinPO(%s)" % self.value
......@@ -34,7 +34,7 @@ We can open a new blob file for reading, but it won't have any data::
But we can write data to a new Blob by opening it for writing::
>>> f = myblob.open("w")
>>> f.write("Hi, Blob!")
>>> _ = f.write(b"Hi, Blob!")
If we try to open a Blob again while it is open for writing, we get an error::
......@@ -77,7 +77,7 @@ Before we can write, we have to close the readers::
Now we can open it for writing again and e.g. append data::
>>> f4 = myblob.open("a")
>>> f4.write("\nBlob is fine.")
>>> _ = f4.write(b"\nBlob is fine.")
We can't open a blob while it is open for writing:
......@@ -122,7 +122,7 @@ We can read lines out of the blob too::
We can seek to certain positions in a blob and read portions of it::
>>> f6 = myblob.open('r')
>>> f6.seek(4)
>>> _ = f6.seek(4)
>>> int(f6.tell())
4
>>> f6.read(5)
......@@ -133,7 +133,7 @@ We can use the object returned by a blob open call as an iterable::
>>> f7 = myblob.open('r')
>>> for line in f7:
... print line
... print(line.decode())
Hi, Blob!
<BLANKLINE>
Blob is fine.
......@@ -142,7 +142,7 @@ We can use the object returned by a blob open call as an iterable::
We can truncate a blob::
>>> f8 = myblob.open('a')
>>> f8.truncate(0)
>>> _ = f8.truncate(0)
>>> f8.close()
>>> f8 = myblob.open('r')
>>> f8.read()
......@@ -159,12 +159,13 @@ Blobs are always opened in binary mode::
Blobs that have not been committed can be opened using any mode,
except for "c"::
>>> import six
>>> from ZODB.blob import BlobError, valid_modes
>>> for mode in valid_modes:
... try:
... f10 = Blob().open(mode)
... except BlobError:
... print 'open failed with mode "%s"' % mode
... six.print_('open failed with mode "%s"' % mode)
... else:
... f10.close()
open failed with mode "c"
......@@ -192,6 +193,6 @@ Passing data to the blob constructor
If you have a small amount of data, you can pass it to the blob
constructor. (This is a convenience, mostly for writing tests.)
>>> myblob = Blob('some data')
>>> myblob = Blob(b'some data')
>>> myblob.open().read()
'some data'
......@@ -7,7 +7,7 @@ an O(1) operation we call `consume`::
Let's create a file::
>>> to_import = open('to_import', 'wb')
>>> to_import.write("I'm a Blob and I feel fine.")
>>> _ = to_import.write(b"I'm a Blob and I feel fine.")
The file *must* be closed before giving it to consumeFile:
......@@ -32,13 +32,14 @@ We now can call open on the blob and read and write the data::
"I'm a Blob and I feel fine."
>>> blob_read.close()
>>> blob_write = blob.open('w')
>>> blob_write.write('I was changed.')
>>> _ = blob_write.write(b'I was changed.')
>>> blob_write.close()
We can not consume a file when there is a reader or writer around for a blob
already::
>>> open('to_import', 'wb').write('I am another blob.')
>>> with open('to_import', 'wb') as file:
... _ = file.write(b'I am another blob.')
>>> blob_read = blob.open('r')
>>> blob.consumeFile('to_import')
Traceback (most recent call last):
......@@ -68,12 +69,12 @@ fails. We simulate this in different states:
Case 1: We don't have uncommitted data, but the link operation fails. We fall
back to try a copy/remove operation that is successfull::
>>> open('to_import', 'wb').write('Some data.')
>>> with open('to_import', 'wb') as file:
... _ = file.write(b'Some data.')
>>> def failing_rename(f1, f2):
... import exceptions
... if f1 == 'to_import':
... raise exceptions.OSError("I can't link.")
... raise OSError("I can't link.")
... os_rename(f1, f2)
>>> blob = Blob()
......@@ -95,11 +96,11 @@ exist::
>>> utils_cp = ZODB.utils.cp
>>> def failing_copy(f1, f2):
... import exceptions
... raise exceptions.OSError("I can't copy.")
... raise OSError("I can't copy.")
>>> ZODB.utils.cp = failing_copy
>>> open('to_import', 'wb').write('Some data.')
>>> with open('to_import', 'wb') as file:
... _ = file.write(b'Some data.')
>>> blob.consumeFile('to_import')
Traceback (most recent call last):
OSError: I can't copy.
......@@ -115,7 +116,7 @@ previous uncomitted data::
>>> blob = Blob()
>>> blob_writing = blob.open('w')
>>> blob_writing.write('Uncommitted data')
>>> _ = blob_writing.write(b'Uncommitted data')
>>> blob_writing.close()
>>> blob.consumeFile('to_import')
......
......@@ -24,9 +24,9 @@ entries per directory level:
>>> from ZODB.blob import BushyLayout
>>> bushy = BushyLayout()
>>> bushy.oid_to_path('\x00\x00\x00\x00\x00\x00\x00\x00')
>>> bushy.oid_to_path(b'\x00\x00\x00\x00\x00\x00\x00\x00')
'0x00/0x00/0x00/0x00/0x00/0x00/0x00/0x00'
>>> bushy.oid_to_path('\x00\x00\x00\x00\x00\x00\x00\x01')
>>> bushy.oid_to_path(b'\x00\x00\x00\x00\x00\x00\x00\x01')
'0x00/0x00/0x00/0x00/0x00/0x00/0x00/0x01'
>>> import os
......@@ -54,9 +54,9 @@ of blobs at the same time (e.g. 32k on ext3).
>>> from ZODB.blob import LawnLayout
>>> lawn = LawnLayout()
>>> lawn.oid_to_path('\x00\x00\x00\x00\x00\x00\x00\x00')
>>> lawn.oid_to_path(b'\x00\x00\x00\x00\x00\x00\x00\x00')
'0x00'
>>> lawn.oid_to_path('\x00\x00\x00\x00\x00\x00\x00\x01')
>>> lawn.oid_to_path(b'\x00\x00\x00\x00\x00\x00\x00\x01')
'0x01'
>>> lawn.path_to_oid('0x01')
......@@ -98,10 +98,12 @@ already been used to create a lawn structure.
>>> from ZODB.blob import LAYOUT_MARKER
>>> import os.path
>>> open(os.path.join('blobs', LAYOUT_MARKER), 'wb').write('bushy')
>>> with open(os.path.join('blobs', LAYOUT_MARKER), 'wb') as file:
... _ = file.write(b'bushy')
>>> auto_layout_select('blobs')
'bushy'
>>> open(os.path.join('blobs', LAYOUT_MARKER), 'wb').write('lawn')
>>> with open(os.path.join('blobs', LAYOUT_MARKER), 'wb') as file:
... _ = file.write(b'lawn')
>>> auto_layout_select('blobs')
'lawn'
>>> shutil.rmtree('blobs')
......@@ -111,7 +113,8 @@ not hidden, we assume that it was created with an earlier version of
the blob implementation and uses our `lawn` layout:
>>> os.mkdir('blobs')
>>> open(os.path.join('blobs', '0x0101'), 'wb').write('foo')
>>> with open(os.path.join('blobs', '0x0101'), 'wb') as file:
... _ = file.write(b'foo')
>>> auto_layout_select('blobs')
'lawn'
>>> shutil.rmtree('blobs')
......@@ -119,7 +122,8 @@ the blob implementation and uses our `lawn` layout:
5. If the directory contains only hidden files, use the bushy layout:
>>> os.mkdir('blobs')
>>> open(os.path.join('blobs', '.svn'), 'wb').write('blah')
>>> with open(os.path.join('blobs', '.svn'), 'wb') as file:
... _ = file.write(b'blah')
>>> auto_layout_select('blobs')
'bushy'
>>> shutil.rmtree('blobs')
......@@ -162,7 +166,8 @@ the marker will be used in the future:
>>> base_storage = ZODB.FileStorage.FileStorage(datafs)
>>> os.mkdir(blobs)
>>> open(os.path.join(blobs, 'foo'), 'wb').write('foo')
>>> with open(os.path.join(blobs, 'foo'), 'wb') as file:
... _ = file.write(b'foo')
>>> blob_storage = BlobStorage(blobs, base_storage)
>>> blob_storage.fshelper.layout_name
'lawn'
......@@ -202,12 +207,18 @@ Create a `lawn` directory structure and migrate it to the new `bushy` one:
>>> blob1 = old_fsh.getPathForOID(7039, create=True)
>>> blob2 = old_fsh.getPathForOID(10, create=True)
>>> blob3 = old_fsh.getPathForOID(7034, create=True)
>>> open(os.path.join(blob1, 'foo'), 'wb').write('foo')
>>> open(os.path.join(blob1, 'foo2'), 'wb').write('bar')
>>> open(os.path.join(blob2, 'foo3'), 'wb').write('baz')
>>> open(os.path.join(blob2, 'foo4'), 'wb').write('qux')
>>> open(os.path.join(blob3, 'foo5'), 'wb').write('quux')
>>> open(os.path.join(blob3, 'foo6'), 'wb').write('corge')
>>> with open(os.path.join(blob1, 'foo'), 'wb') as file:
... _ = file.write(b'foo')
>>> with open(os.path.join(blob1, 'foo2'), 'wb') as file:
... _ = file.write(b'bar')
>>> with open(os.path.join(blob2, 'foo3'), 'wb') as file:
... _ = file.write(b'baz')
>>> with open(os.path.join(blob2, 'foo4'), 'wb') as file:
... _ = file.write(b'qux')
>>> with open(os.path.join(blob3, 'foo5'), 'wb') as file:
... _ = file.write(b'quux')
>>> with open(os.path.join(blob3, 'foo6'), 'wb') as file:
... _ = file.write(b'corge')
Committed blobs have their permissions set to 000
......@@ -237,16 +248,17 @@ with the same sizes and permissions:
>>> len(lawn_files) == len(bushy_files)
True
>>> import six
>>> for file_name, lawn_path in sorted(lawn_files.items()):
... if file_name == '.layout':
... continue
... lawn_stat = os.stat(lawn_path)
... bushy_path = bushy_files[file_name]
... bushy_stat = os.stat(bushy_path)
... print lawn_path, '-->', bushy_path
... six.print_(lawn_path, '-->', bushy_path)
... if ((lawn_stat.st_mode, lawn_stat.st_size) !=
... (bushy_stat.st_mode, bushy_stat.st_size)):
... print 'oops'
... print('oops')
old/0x1b7f/foo --> bushy/0x00/0x00/0x00/0x00/0x00/0x00/0x1b/0x7f/foo
old/0x1b7f/foo2 --> bushy/0x00/0x00/0x00/0x00/0x00/0x00/0x1b/0x7f/foo2
old/0x0a/foo3 --> bushy/0x00/0x00/0x00/0x00/0x00/0x00/0x00/0x0a/foo3
......@@ -277,10 +289,10 @@ True
... lawn_stat = os.stat(lawn_path)
... bushy_path = bushy_files[file_name]
... bushy_stat = os.stat(bushy_path)
... print bushy_path, '-->', lawn_path
... six.print_(bushy_path, '-->', lawn_path)
... if ((lawn_stat.st_mode, lawn_stat.st_size) !=
... (bushy_stat.st_mode, bushy_stat.st_size)):
... print 'oops'
... print('oops')
bushy/0x00/0x00/0x00/0x00/0x00/0x00/0x1b/0x7f/foo --> lawn/0x1b7f/foo
bushy/0x00/0x00/0x00/0x00/0x00/0x00/0x1b/0x7f/foo2 --> lawn/0x1b7f/foo2
bushy/0x00/0x00/0x00/0x00/0x00/0x00/0x00/0x0a/foo3 --> lawn/0x0a/foo3
......
......@@ -57,32 +57,37 @@ Put some revisions of a blob object in our database and on the filesystem:
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> blob = Blob()
>>> blob.open('w').write('this is blob data 0')
>>> with blob.open('w') as file:
... _ = file.write(b'this is blob data 0')
>>> root['blob'] = blob
>>> transaction.commit()
>>> tids.append(blob_storage._tid)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 1')
>>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 1')
>>> transaction.commit()
>>> tids.append(blob_storage._tid)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 2')
>>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 2')
>>> transaction.commit()
>>> tids.append(blob_storage._tid)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 3')
>>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 3')
>>> transaction.commit()
>>> tids.append(blob_storage._tid)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 4')
>>> with root['blob'].open('w') as file:
... _ = file.write(b'this is blob data 4')
>>> transaction.commit()
>>> tids.append(blob_storage._tid)
......@@ -142,9 +147,11 @@ is reset:
We can also see, that the flag is set during the pack, by leveraging the
knowledge that the underlying storage's pack method is also called:
>>> import six
>>> def dummy_pack(time, ref):
... print "_blobs_pack_is_in_progress =",
... print blob_storage._blobs_pack_is_in_progress
... six.print_(
... "_blobs_pack_is_in_progress =",
... blob_storage._blobs_pack_is_in_progress)
... return base_pack(time, ref)
>>> base_pack = base_storage.pack
>>> base_storage.pack = dummy_pack
......
......@@ -30,7 +30,8 @@ Open one more, and we get a warning:
>>> len(handler.records)
1
>>> msg = handler.records[0]
>>> print msg.name, msg.levelname, msg.getMessage()
>>> import six
>>> six.print_(msg.name, msg.levelname, msg.getMessage())
ZODB.DB WARNING DB.open() has 8 open connections with a pool_size of 7
Open 6 more, and we get 6 more warnings:
......@@ -41,7 +42,7 @@ Open 6 more, and we get 6 more warnings:
>>> len(handler.records)
7
>>> msg = handler.records[-1]
>>> print msg.name, msg.levelname, msg.getMessage()
>>> six.print_(msg.name, msg.levelname, msg.getMessage())
ZODB.DB WARNING DB.open() has 14 open connections with a pool_size of 7
Add another, so that it's more than twice the default, and the level
......@@ -53,7 +54,7 @@ rises to critical:
>>> len(handler.records)
8
>>> msg = handler.records[-1]
>>> print msg.name, msg.levelname, msg.getMessage()
>>> six.print_(msg.name, msg.levelname, msg.getMessage())
ZODB.DB CRITICAL DB.open() has 15 open connections with a pool_size of 7
While it's boring, it's important to verify that the same relationships
......@@ -74,7 +75,7 @@ A warning for opening one more:
>>> len(handler.records)
1
>>> msg = handler.records[0]
>>> print msg.name, msg.levelname, msg.getMessage()
>>> six.print_(msg.name, msg.levelname, msg.getMessage())
ZODB.DB WARNING DB.open() has 3 open connections with a pool_size of 2
More warnings through 4 connections:
......@@ -85,7 +86,7 @@ More warnings through 4 connections:
>>> len(handler.records)
2
>>> msg = handler.records[-1]
>>> print msg.name, msg.levelname, msg.getMessage()
>>> six.print_(msg.name, msg.levelname, msg.getMessage())
ZODB.DB WARNING DB.open() has 4 open connections with a pool_size of 2
And critical for going beyond that:
......@@ -96,7 +97,7 @@ And critical for going beyond that:
>>> len(handler.records)
3
>>> msg = handler.records[-1]
>>> print msg.name, msg.levelname, msg.getMessage()
>>> six.print_(msg.name, msg.levelname, msg.getMessage())
ZODB.DB CRITICAL DB.open() has 5 open connections with a pool_size of 2
We can change the pool size on the fly:
......@@ -110,7 +111,7 @@ We can change the pool size on the fly:
>>> len(handler.records)
1
>>> msg = handler.records[0]
>>> print msg.name, msg.levelname, msg.getMessage()
>>> six.print_(msg.name, msg.levelname, msg.getMessage())
ZODB.DB WARNING DB.open() has 7 open connections with a pool_size of 6
Enough of that.
......
......@@ -63,7 +63,7 @@ entries:
True
>>> len(db2.databases)
2
>>> names = dbmap.keys(); names.sort(); print names
>>> names = sorted(dbmap.keys()); print(names)
['notroot', 'root']
It's an error to try to insert a database with a name already in use:
......@@ -112,7 +112,7 @@ Now there are two connections in that collection:
True
>>> len(cn2.connections)
2
>>> names = cn.connections.keys(); names.sort(); print names
>>> names = sorted(cn.connections.keys()); print(names)
['notroot', 'root']
So long as this database group remains open, the same ``Connection`` objects
......@@ -155,9 +155,9 @@ ZODB 3.6:
... </zodb>
... """
>>> db = databaseFromString(config)
>>> print db.database_name
>>> print(db.database_name)
this_is_the_name
>>> db.databases.keys()
>>> sorted(db.databases.keys())
['this_is_the_name']
However, the ``.databases`` attribute cannot be configured from file. It
......@@ -166,7 +166,12 @@ to test that here; this is ugly:
>>> from ZODB.config import getDbSchema
>>> import ZConfig
>>> from cStringIO import StringIO
>>> try:
... from cStringIO import StringIO
... except ImportError:
... # Py3
... from io import StringIO
Derive a new `config2` string from the `config` string, specifying a
different database_name:
......@@ -182,12 +187,11 @@ Now get a `ZConfig` factory from `config2`:
The desired ``databases`` mapping can be passed to this factory:
>>> db2 = factory[0].open(databases=db.databases)
>>> print db2.database_name # has the right name
>>> print(db2.database_name) # has the right name
another_name
>>> db.databases is db2.databases # shares .databases with `db`
True
>>> all = db2.databases.keys()
>>> all.sort()
>>> all = sorted(db2.databases.keys())
>>> all # and db.database_name & db2.database_name are the keys
['another_name', 'this_is_the_name']
......
......@@ -23,7 +23,7 @@ if os.environ.get('USE_ZOPE_TESTING_DOCTEST'):
from zope.testing.doctest import DocTestSuite
else:
from doctest import DocTestSuite
from ZODB.tests.util import DB
from ZODB.tests.util import DB, checker
def test_integration():
r"""Test the integration of broken object support with the databse:
......@@ -92,8 +92,8 @@ def test_integration():
def test_suite():
return unittest.TestSuite((
DocTestSuite('ZODB.broken'),
DocTestSuite(),
DocTestSuite('ZODB.broken', checker=checker),
DocTestSuite(checker=checker),
))
if __name__ == '__main__': unittest.main()
......@@ -33,6 +33,8 @@ import ZODB
import ZODB.MappingStorage
import ZODB.tests.util
PY2 = sys.version_info[0] == 2
class CacheTestBase(ZODB.tests.util.TestCase):
def setUp(self):
......@@ -96,8 +98,8 @@ class DBMethods(CacheTestBase):
def checkCacheDetail(self):
for name, count in self.db.cacheDetail():
self.assert_(isinstance(name, bytes))
self.assert_(isinstance(count, int))
self.assertEqual(isinstance(name, str), True)
self.assertEqual(isinstance(count, int), True)
def checkCacheExtremeDetail(self):
expected = ['conn_no', 'id', 'oid', 'rc', 'klass', 'state']
......@@ -435,17 +437,19 @@ The cache is empty initially:
We force the root to be loaded and the cache grows:
Py3: XXX: This needs more investigation in Connection.
>>> getattr(conn.root, 'z', None)
>>> conn._cache.total_estimated_size
64
>>> conn._cache.total_estimated_size == (64 if PY2 else 128)
True
We add some data and the cache grows:
>>> conn.root.z = ZODB.tests.util.P('x'*100)
>>> import transaction
>>> transaction.commit()
>>> conn._cache.total_estimated_size
320
>>> conn._cache.total_estimated_size == (320 if PY2 else 320+64)
True
Loading the objects in another connection gets the same sizes:
......@@ -453,11 +457,11 @@ Loading the objects in another connection gets the same sizes:
>>> conn2._cache.total_estimated_size
0
>>> getattr(conn2.root, 'x', None)
>>> conn2._cache.total_estimated_size
128
>>> conn._cache.total_estimated_size == (64 if PY2 else 128)
True
>>> _ = conn2.root.z.name
>>> conn2._cache.total_estimated_size
320
>>> conn._cache.total_estimated_size == (320 if PY2 else 320+64)
True
If we deactivate, the size goes down:
......
......@@ -186,7 +186,9 @@ def multi_atabases():
def test_suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(
setUp=ZODB.tests.util.setUp, tearDown=ZODB.tests.util.tearDown))
setUp=ZODB.tests.util.setUp,
tearDown=ZODB.tests.util.tearDown,
checker=ZODB.tests.util.checker))
suite.addTest(unittest.makeSuite(ZODBConfigTest))
return suite
......
......@@ -15,12 +15,23 @@
from ZODB.tests.MinPO import MinPO
import doctest
import os
import re
import sys
import time
import transaction
import unittest
import ZODB
import ZODB.tests.util
from zope.testing import renormalizing
checker = renormalizing.RENormalizing([
# Python 3 bytes add a "b".
(re.compile("b('.*?')"),
r"\1"),
# Python 3 adds module name to exceptions.
(re.compile("ZODB.POSException.ReadConflictError"), r"ReadConflictError"),
])
# Return total number of connections across all pools in a db._pools.
def nconn(pools):
......@@ -196,7 +207,7 @@ def open_convenience():
DB arguments.
>>> conn = ZODB.connection('data.fs', blob_dir='blobs')
>>> conn.root()['b'] = ZODB.blob.Blob('test')
>>> conn.root()['b'] = ZODB.blob.Blob(b'test')
>>> transaction.commit()
>>> conn.close()
......@@ -348,5 +359,6 @@ def test_suite():
s = unittest.makeSuite(DBTests)
s.addTest(doctest.DocTestSuite(
setUp=ZODB.tests.util.setUp, tearDown=ZODB.tests.util.tearDown,
checker=checker
))
return s
......@@ -38,11 +38,6 @@ import ZODB.tests.util
import ZODB.utils
from zope.testing import renormalizing
checker = renormalizing.RENormalizing([
# Python 3 adds module name to exceptions.
(re.compile("ZODB.POSException.POSKeyError"), r"POSKeyError"),
])
class DemoStorageTests(
StorageTestBase.StorageTestBase,
......@@ -251,11 +246,14 @@ def load_before_base_storage_current():
def test_suite():
suite = unittest.TestSuite((
doctest.DocTestSuite(
setUp=setUp, tearDown=ZODB.tests.util.tearDown, checker=checker
setUp=setUp, tearDown=ZODB.tests.util.tearDown,
checker=ZODB.tests.util.checker
),
doctest.DocFileSuite(
'../DemoStorage.test',
setUp=setUp, tearDown=ZODB.tests.util.tearDown,
setUp=setUp,
tearDown=ZODB.tests.util.tearDown,
checker=ZODB.tests.util.checker,
),
))
suite.addTest(unittest.makeSuite(DemoStorageTests, 'check'))
......
......@@ -160,13 +160,13 @@ class MVCCMappingStorageTests(
# Add a fake transaction
transactions = self._storage._transactions
self.assertEqual(1, len(transactions))
fake_timestamp = 'zzzzzzzy' # the year 5735 ;-)
fake_timestamp = b'zzzzzzzy' # the year 5735 ;-)
transactions[fake_timestamp] = transactions.values()[0]
# Verify the next transaction comes after the fake transaction
t = transaction.Transaction()
self._storage.tpc_begin(t)
self.assertEqual(self._storage._tid, 'zzzzzzzz')
self.assertEqual(self._storage._tid, b'zzzzzzzz')
def create_blob_storage(name, blob_dir):
s = MVCCMappingStorage(name)
......
......@@ -13,10 +13,12 @@
##############################################################################
"""Test the list interface to PersistentList
"""
import sys
import unittest
from persistent.list import PersistentList
PY2 = sys.version_info[0] == 2
l0 = []
l1 = [0]
l2 = [0, 1]
......@@ -54,6 +56,8 @@ class TestPList(unittest.TestCase):
# Test __cmp__ and __len__
# Py3: No cmp() or __cmp__ anymore.
if PY2:
def mycmp(a, b):
r = cmp(a, b)
if r < 0: return -1
......
......@@ -40,6 +40,7 @@ except ImportError:
# Py3
import io as cStringIO
PY2 = sys.version_info[0] == 2
# This pickle contains a persistent mapping pickle created from the
# old code.
......@@ -124,31 +125,26 @@ class PMTests(unittest.TestCase):
self.assertEqual(m.get('fred'), None)
self.assertEqual(m.get('fred', 42), 42)
keys = m.keys()
keys.sort()
keys = sorted(m.keys())
self.assertEqual(keys, ['a', 'b', 'name', 'x'])
values = m.values()
values.sort()
self.assertEqual(values, [1, 2, 3, 'bob'])
values = set(m.values())
self.assertEqual(values, set([1, 2, 3, 'bob']))
items = m.items()
items.sort()
items = sorted(m.items())
self.assertEqual(items,
[('a', 2), ('b', 3), ('name', 'bob'), ('x', 1)])
keys = list(m.iterkeys())
keys.sort()
if PY2:
keys = sorted(m.iterkeys())
self.assertEqual(keys, ['a', 'b', 'name', 'x'])
values = list(m.itervalues())
values.sort()
values = sorted(m.itervalues())
self.assertEqual(values, [1, 2, 3, 'bob'])
items = list(m.iteritems())
items.sort()
self.assertEqual(items,
[('a', 2), ('b', 3), ('name', 'bob'), ('x', 1)])
items = sorted(m.iteritems())
self.assertEqual(
items, [('a', 2), ('b', 3), ('name', 'bob'), ('x', 1)])
# PersistentMapping didn't have an __iter__ method before ZODB 3.4.2.
# Check that it plays well now with the Python iteration protocol.
......
......@@ -36,7 +36,7 @@ def test_weakrefs_functional():
>>> ref() is ob
True
The hash of the ref if the same as the hash of the referenced object:
The hash of the ref is the same as the hash of the referenced object:
>>> hash(ref) == hash(ob)
True
......
......@@ -73,7 +73,7 @@ class RecoverTest(ZODB.tests.util.TestCase):
offset = random.randint(0, self.storage._pos - size)
f = open(self.path, "a+b")
f.seek(offset)
f.write("\0" * size)
f.write(b"\0" * size)
f.close()
ITERATIONS = 5
......@@ -106,11 +106,11 @@ class RecoverTest(ZODB.tests.util.TestCase):
self.assert_('\n0 bytes removed during recovery' in output, output)
# Verify that the recovered database is identical to the original.
before = file(self.path, 'rb')
before = open(self.path, 'rb')
before_guts = before.read()
before.close()
after = file(self.dest, 'rb')
after = open(self.dest, 'rb')
after_guts = after.read()
after.close()
......@@ -164,10 +164,10 @@ class RecoverTest(ZODB.tests.util.TestCase):
# Overwrite the entire header.
f = open(self.path, "a+b")
f.seek(pos1 - 50)
f.write("\0" * 100)
f.write(b"\0" * 100)
f.close()
output = self.recover()
self.assert_('error' in output, output)
self.assertTrue('error' in output, output)
self.recovered = FileStorage(self.dest)
self.recovered.close()
os.remove(self.path)
......@@ -176,10 +176,10 @@ class RecoverTest(ZODB.tests.util.TestCase):
# Overwrite part of the header.
f = open(self.path, "a+b")
f.seek(pos2 + 10)
f.write("\0" * 100)
f.write(b"\0" * 100)
f.close()
output = self.recover()
self.assert_('error' in output, output)
self.assertTrue('error' in output, output)
self.recovered = FileStorage(self.dest)
self.recovered.close()
......@@ -197,9 +197,9 @@ class RecoverTest(ZODB.tests.util.TestCase):
f = open(self.path, "r+b")
f.seek(pos + 16)
current_status = f.read(1)
self.assertEqual(current_status, ' ')
self.assertEqual(current_status, b' ')
f.seek(pos + 16)
f.write('c')
f.write(b'c')
f.close()
# Try to recover. The original bug was that this never completed --
......
......@@ -15,6 +15,7 @@ import doctest
import sys
import unittest
import ZODB.tests.util
from ZODB import serialize
try:
......@@ -131,5 +132,6 @@ class SerializerTestCase(unittest.TestCase):
def test_suite():
suite = unittest.makeSuite(SerializerTestCase)
suite.addTest(doctest.DocTestSuite("ZODB.serialize"))
suite.addTest(
doctest.DocTestSuite("ZODB.serialize", checker=ZODB.tests.util.checker))
return suite
......@@ -53,12 +53,12 @@ class TestUtils(unittest.TestCase):
self.assertEquals(num, n2, "u64() failed")
def checkKnownConstants(self):
self.assertEquals("\000\000\000\000\000\000\000\001", p64(1))
self.assertEquals("\000\000\000\001\000\000\000\000", p64(1<<32))
self.assertEquals(u64("\000\000\000\000\000\000\000\001"), 1)
self.assertEquals(U64("\000\000\000\000\000\000\000\001"), 1)
self.assertEquals(u64("\000\000\000\001\000\000\000\000"), 1<<32)
self.assertEquals(U64("\000\000\000\001\000\000\000\000"), 1<<32)
self.assertEquals(b"\000\000\000\000\000\000\000\001", p64(1))
self.assertEquals(b"\000\000\000\001\000\000\000\000", p64(1<<32))
self.assertEquals(u64(b"\000\000\000\000\000\000\000\001"), 1)
self.assertEquals(U64(b"\000\000\000\000\000\000\000\001"), 1)
self.assertEquals(u64(b"\000\000\000\001\000\000\000\000"), 1<<32)
self.assertEquals(U64(b"\000\000\000\001\000\000\000\000"), 1<<32)
def checkPersistentIdHandlesDescriptor(self):
from ZODB.serialize import ObjectWriter
......@@ -88,11 +88,11 @@ class TestUtils(unittest.TestCase):
# The pickle contains a GLOBAL ('c') opcode resolving to MinPO's
# module and class.
self.assert_('cZODB.tests.MinPO\nMinPO\n' in data)
self.assert_(b'cZODB.tests.MinPO\nMinPO\n' in data)
# Fiddle the pickle so it points to something "impossible" instead.
data = data.replace('cZODB.tests.MinPO\nMinPO\n',
'cpath.that.does.not.exist\nlikewise.the.class\n')
data = data.replace(b'cZODB.tests.MinPO\nMinPO\n',
b'cpath.that.does.not.exist\nlikewise.the.class\n')
# Pickle can't resolve that GLOBAL opcode -- gets ImportError.
self.assertRaises(ImportError, pickle.loads, data)
......@@ -101,8 +101,8 @@ class TestUtils(unittest.TestCase):
raise ConflictError(object=obj, data=data)
except ConflictError as detail:
# And verify that the msg names the impossible path.
self.assert_('path.that.does.not.exist.likewise.the.class' in
str(detail))
self.assertTrue(
'path.that.does.not.exist.likewise.the.class' in str(detail))
else:
self.fail("expected ConflictError, but no exception raised")
......
......@@ -69,8 +69,10 @@ Clean up.
import doctest
import zope.testing.setupstack
import ZODB.tests.util
def test_suite():
return doctest.DocTestSuite(
setUp=zope.testing.setupstack.setUpDirectory,
tearDown=zope.testing.setupstack.tearDown)
tearDown=zope.testing.setupstack.tearDown,
checker=ZODB.tests.util.checker)
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from doctest import DocTestSuite
import unittest
def test_new_ghost_w_persistent_class():
"""
Peristent meta classes work with PickleCache.new_ghost:
>>> import ZODB.persistentclass
>>> class PC:
... __metaclass__ = ZODB.persistentclass.PersistentMetaClass
>>> PC._p_oid
>>> PC._p_jar
>>> PC._p_serial
>>> PC._p_changed
False
>>> import persistent
>>> jar = object()
>>> cache = persistent.PickleCache(jar, 10, 100)
>>> cache.new_ghost('1', PC)
>>> PC._p_oid
'1'
>>> PC._p_jar is jar
True
>>> PC._p_serial
>>> PC._p_changed
False
"""
def test_suite():
return unittest.TestSuite((
DocTestSuite(),
))
......@@ -42,11 +42,10 @@ import zope.testing.renormalizing
try:
from StringIO import StringIO
from StringIO import StringIO as BytesIO
except ImportError:
# Py3
from io import StringIO
from io import BytesIO
def new_time():
"""Create a _new_ time stamp.
......@@ -118,7 +117,7 @@ class BlobCloneTests(ZODB.tests.util.TestCase):
root['blob'] = Blob()
transaction.commit()
stream = StringIO()
stream = BytesIO()
p = Pickler(stream, 1)
p.dump(root['blob'])
u = Unpickler(stream)
......@@ -756,13 +755,15 @@ def test_suite():
setUp=setUp,
tearDown=zope.testing.setupstack.tearDown,
optionflags=doctest.ELLIPSIS,
checker=ZODB.tests.util.checker,
))
suite.addTest(doctest.DocFileSuite(
"blob_layout.txt",
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE,
setUp=setUp,
tearDown=zope.testing.setupstack.tearDown,
checker = zope.testing.renormalizing.RENormalizing([
checker = ZODB.tests.util.checker + \
zope.testing.renormalizing.RENormalizing([
(re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'),
(re.compile(r'\%(sep)s' % dict(sep=os.path.sep)), '/'),
(re.compile(r'\S+/((old|bushy|lawn)/\S+/foo[23456]?)'), r'\1'),
......
......@@ -113,7 +113,6 @@ Of course, none of this applies if content doesn't support conflict resolution.
class Resolveable(persistent.Persistent):
def _p_resolveConflict(self, old, committed, new):
resolved = {}
for k in old:
if k not in committed:
......@@ -296,13 +295,14 @@ And load the pickle:
def test_suite():
return unittest.TestSuite([
manuel.testing.TestSuite(
manuel.doctest.Manuel()
manuel.doctest.Manuel(checker=ZODB.tests.util.checker)
+ manuel.footnote.Manuel()
+ manuel.capture.Manuel(),
'../ConflictResolution.txt',
setUp=setUp, tearDown=tearDown,
setUp=setUp, tearDown=tearDown
),
doctest.DocTestSuite(
setUp=setUp, tearDown=tearDown),
setUp=setUp, tearDown=tearDown,
checker=ZODB.tests.util.checker),
])
......@@ -14,6 +14,7 @@
import doctest
import persistent
import unittest
import ZODB.tests.util
class MyClass(persistent.Persistent):
pass
......@@ -181,15 +182,19 @@ def tearDownDbs(test):
def test_suite():
return unittest.TestSuite((
doctest.DocFileSuite('../cross-database-references.txt',
doctest.DocFileSuite(
'../cross-database-references.txt',
globs=dict(MyClass=MyClass),
tearDown=tearDownDbs,
checker=ZODB.tests.util.checker,
),
doctest.DocFileSuite('../cross-database-references.txt',
doctest.DocFileSuite(
'../cross-database-references.txt',
globs=dict(MyClass=MyClass_w_getnewargs),
tearDown=tearDownDbs,
checker=ZODB.tests.util.checker,
),
doctest.DocTestSuite(),
doctest.DocTestSuite(checker=ZODB.tests.util.checker),
))
if __name__ == '__main__':
......
......@@ -20,6 +20,11 @@ from ZODB.utils import p64, z64
from ZODB.tests.util import setUp, tearDown
import six
try:
xrange
except NameError:
# Py3: No xrange.
xrange = range
class Test(unittest.TestCase):
......@@ -143,7 +148,7 @@ class Test(unittest.TestCase):
# Now build up a tree with random values, and check maxKey at each
# step.
correct_max = "" # smaller than anything we'll add
correct_max = b"" # smaller than anything we'll add
for i in range(1000):
key = p64(random.randrange(100000000))
index[key] = i
......@@ -152,10 +157,10 @@ class Test(unittest.TestCase):
self.assertEqual(index_max, correct_max)
index.clear()
a = '\000\000\000\000\000\001\000\000'
b = '\000\000\000\000\000\002\000\000'
c = '\000\000\000\000\000\003\000\000'
d = '\000\000\000\000\000\004\000\000'
a = b'\000\000\000\000\000\001\000\000'
b = b'\000\000\000\000\000\002\000\000'
c = b'\000\000\000\000\000\003\000\000'
d = b'\000\000\000\000\000\004\000\000'
index[a] = 1
index[c] = 2
self.assertEqual(index.maxKey(b), a)
......@@ -171,7 +176,7 @@ class Test(unittest.TestCase):
# Now build up a tree with random values, and check minKey at each
# step.
correct_min = "\xff" * 8 # bigger than anything we'll add
correct_min = b"\xff" * 8 # bigger than anything we'll add
for i in range(1000):
key = p64(random.randrange(100000000))
index[key] = i
......@@ -180,10 +185,10 @@ class Test(unittest.TestCase):
self.assertEqual(index_min, correct_min)
index.clear()
a = '\000\000\000\000\000\001\000\000'
b = '\000\000\000\000\000\002\000\000'
c = '\000\000\000\000\000\003\000\000'
d = '\000\000\000\000\000\004\000\000'
a = b'\000\000\000\000\000\001\000\000'
b = b'\000\000\000\000\000\002\000\000'
c = b'\000\000\000\000\000\003\000\000'
d = b'\000\000\000\000\000\004\000\000'
index[a] = 1
index[c] = 2
self.assertEqual(index.minKey(b), c)
......
......@@ -168,6 +168,7 @@ Clean up.
"""
import doctest
import ZODB.tests.util
def test_suite():
return doctest.DocTestSuite()
return doctest.DocTestSuite(checker=ZODB.tests.util.checker)
......@@ -18,7 +18,8 @@ import ZODB.tests.util
def test_suite():
return manuel.testing.TestSuite(
manuel.doctest.Manuel() + manuel.footnote.Manuel(),
manuel.doctest.Manuel(checker=ZODB.tests.util.checker) +
manuel.footnote.Manuel(),
'../historical_connections.txt',
setUp=ZODB.tests.util.setUp, tearDown=ZODB.tests.util.tearDown,
)
......@@ -22,8 +22,7 @@ def class_with_circular_ref_to_self():
"""
It should be possible for a class to reger to itself.
>>> class C:
... __metaclass__ = ZODB.persistentclass.PersistentMetaClass
>>> C = ZODB.persistentclass.PersistentMetaClass('C', (object,), {})
>>> C.me = C
>>> db = ZODB.tests.util.DB()
......@@ -39,6 +38,34 @@ It should be possible for a class to reger to itself.
"""
def test_new_ghost_w_persistent_class():
"""
Peristent meta classes work with PickleCache.new_ghost:
>>> import ZODB.persistentclass
>>> PC = ZODB.persistentclass.PersistentMetaClass('PC', (object,), {})
>>> PC._p_oid
>>> PC._p_jar
>>> PC._p_serial
>>> PC._p_changed
False
>>> import persistent
>>> jar = object()
>>> cache = persistent.PickleCache(jar, 10, 100)
>>> cache.new_ghost('1', PC)
>>> PC._p_oid
'1'
>>> PC._p_jar is jar
True
>>> PC._p_serial
>>> PC._p_changed
False
"""
# XXX need to update files to get newer testing package
class FakeModule:
def __init__(self, name, dict):
......@@ -59,8 +86,10 @@ def tearDown(test):
def test_suite():
return unittest.TestSuite((
doctest.DocFileSuite("../persistentclass.txt",
setUp=setUp, tearDown=tearDown),
doctest.DocFileSuite(
"../persistentclass.txt",
setUp=setUp, tearDown=tearDown,
checker=ZODB.tests.util.checker),
doctest.DocTestSuite(setUp=setUp, tearDown=tearDown),
))
......
......@@ -18,6 +18,7 @@ from ZODB.MappingStorage import DB
import atexit
import os
import persistent
import re
import sys
import tempfile
import time
......@@ -26,6 +27,36 @@ import unittest
import warnings
import ZODB.utils
import zope.testing.setupstack
from zope.testing import renormalizing
checker = renormalizing.RENormalizing([
(re.compile("<(.*?) object at 0x[0-9a-f]*?>"),
r"<\1 object at 0x000000000000>"),
# Python 3 bytes add a "b".
(re.compile("b('.*?')"),
r"\1"),
(re.compile('b(".*?")'),
r"\1"),
# Python 3 adds module name to exceptions.
(re.compile("ZODB.interfaces.BlobError"),
r"BlobError"),
(re.compile("ZODB.blob.BlobStorageError"),
r"BlobStorageError"),
(re.compile("ZODB.broken.BrokenModified"),
r"BrokenModified"),
(re.compile("ZODB.POSException.POSKeyError"),
r"POSKeyError"),
(re.compile("ZODB.POSException.ConflictError"),
r"ConflictError"),
(re.compile("ZODB.POSException.ReadConflictError"),
r"ReadConflictError"),
(re.compile("ZODB.POSException.InvalidObjectReference"),
r"InvalidObjectReference"),
(re.compile("ZODB.POSException.ReadOnlyHistoryError"),
r"ReadOnlyHistoryError"),
(re.compile("ZConfig.ConfigurationSyntaxError"),
r"ConfigurationSyntaxError"),
])
def setUp(test, name='test'):
transaction.abort()
......@@ -125,9 +156,9 @@ def wait(func=None, timeout=30):
raise AssertionError
def store(storage, oid, value='x', serial=ZODB.utils.z64):
if not isinstance(oid, str):
if not isinstance(oid, bytes):
oid = ZODB.utils.p64(oid)
if not isinstance(serial, str):
if not isinstance(serial, bytes):
serial = ZODB.utils.p64(serial)
t = transaction.get()
storage.tpc_begin(t)
......
......@@ -27,10 +27,10 @@ except ImportError:
import pickle
try:
from cStringIO import StringIO
from cStringIO import StringIO as BytesIO
except ImportError:
# Py3
from io import StringIO
from io import BytesIO
from persistent.TimeStamp import TimeStamp
......@@ -177,7 +177,7 @@ def repr_to_oid(repr):
if repr.startswith("0x"):
repr = repr[2:]
as_bin = unhexlify(repr)
as_bin = "\x00"*(8-len(as_bin)) + as_bin
as_bin = b"\x00"*(8-len(as_bin)) + as_bin
return as_bin
serial_repr = oid_repr
......@@ -219,12 +219,14 @@ def positive_id(obj):
# for what serialize.py calls formats 5 and 6.
def get_pickle_metadata(data):
# Returns a 2-tuple of strings.
# ZODB's data records contain two pickles. The first is the class
# of the object, the second is the object. We're only trying to
# pick apart the first here, to extract the module and class names.
if data.startswith('(c'): # pickle MARK GLOBAL opcode sequence
if data.startswith(b'(c'): # pickle MARK GLOBAL opcode sequence
global_prefix = 2
elif data.startswith('c'): # pickle GLOBAL opcode
elif data.startswith(b'c'): # pickle GLOBAL opcode
global_prefix = 1
else:
global_prefix = 0
......@@ -235,12 +237,12 @@ def get_pickle_metadata(data):
# load the class. Just break open the pickle and get the
# module and class from it. The module and class names are given by
# newline-terminated strings following the GLOBAL opcode.
modname, classname, rest = data.split('\n', 2)
modname, classname, rest = data.split(b'\n', 2)
modname = modname[global_prefix:] # strip GLOBAL opcode
return modname, classname
return modname.decode(), classname.decode()
# Else there are a bunch of other possible formats.
f = StringIO(data)
f = BytesIO(data)
u = pickle.Unpickler(f)
try:
class_info = u.load()
......
[tox]
envlist = py26,py27,py33
[testenv]
commands =
python setup.py test -q
# without explicit deps, setup.py test will download a bunch of eggs into $PWD
deps =
BTrees
ZConfig
manuel
persistent
six
transaction
zc.lockfile
zdaemon
zope.interface
zope.testing
[testenv:coverage]
basepython =
python2.7
commands =
# The installed version messes up nose's test discovery / coverage reporting
# So, we uninstall that from the environment, and then install the editable
# version, before running nosetests.
pip uninstall -y ZODB
pip install -e .
nosetests --with-xunit --with-xcoverage
deps =
nose
coverage
nosexcover
BTrees
ZConfig
manuel
persistent
six
transaction
zc.lockfile
zdaemon
zope.interface
zope.testing
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment