Commit de1ed537 authored by Stephan Richter's avatar Stephan Richter

Addressed all of Marius' comments from the previous checkin.

parent e7d8ca72
......@@ -16,7 +16,7 @@
The base class here is tightly coupled with its subclasses and
its use is not recommended. It's still here for historical reasons.
"""
from __future__ import print_function, with_statement
from __future__ import print_function
import threading
import time
......
......@@ -274,7 +274,7 @@ def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
if sys.version_info[0] < 3:
pickler.inst_persistent_id = persistent_id
else:
pickler.inst_persistent_id = persistent_id
pickler.persistent_id = persistent_id
pickler.dump(meta)
pickler.dump(resolved)
return self._crs_transform_record_data(file.getvalue(1))
......
......@@ -48,8 +48,8 @@ class ExportImport:
f.write(b'ZEXP')
oids = [oid]
done_oids = {}
done=done_oids.__contains__
load=self._storage.load
done = done_oids.__contains__
load = self._storage.load
supports_blobs = IBlobStorage.providedBy(self._storage)
while oids:
oid = oids.pop(0)
......
......@@ -13,7 +13,7 @@
##############################################################################
"""Storage implementation using a log written to a single file.
"""
from __future__ import print_function, with_statement
from __future__ import print_function
from persistent.TimeStamp import TimeStamp
from struct import pack, unpack
......@@ -129,7 +129,7 @@ class FileStorage(
raise ValueError("time-travel only supported in read-only mode")
if stop is None:
stop=b'\377'*8
stop = b'\377'*8
# Lock the database and set up the temp file.
if not read_only:
......@@ -1062,7 +1062,7 @@ class FileStorage(
raise POSException.ReadOnlyError()
stop = TimeStamp(*time.gmtime(t)[:5]+(t%60,)).raw()
if stop==z64:
if stop == z64:
raise FileStorageError('Invalid pack time')
# If the storage is empty, there's nothing to do.
......
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
......@@ -17,7 +11,7 @@ from __future__ import print_function
# FOR A PARTICULAR PURPOSE
#
##############################################################################
from __future__ import print_function
import ZODB.FileStorage
from ZODB.utils import get_pickle_metadata, p64, oid_repr, tid_repr
from ZODB.serialize import get_refs
......
......@@ -34,18 +34,22 @@ checker = renormalizing.RENormalizing([
def pack_keep_old():
"""Should a copy of the database be kept?
The pack_keep_old constructor argument controls whether a .old file (and .old directory for blobs is kept.)
The pack_keep_old constructor argument controls whether a .old file (and .old
directory for blobs is kept.)
>>> fs = ZODB.FileStorage.FileStorage('data.fs', blob_dir='blobs')
>>> db = ZODB.DB(fs)
>>> conn = db.open()
>>> import ZODB.blob
>>> conn.root()[1] = ZODB.blob.Blob()
>>> _ = conn.root()[1].open('w').write(b'some data')
>>> with conn.root()[1].open('w') as file:
... _ = file.write(b'some data')
>>> conn.root()[2] = ZODB.blob.Blob()
>>> _ = conn.root()[2].open('w').write(b'some data')
>>> with conn.root()[2].open('w') as file:
... _ = file.write(b'some data')
>>> transaction.commit()
>>> _ = conn.root()[1].open('w').write(b'some other data')
>>> with conn.root()[1].open('w') as file:
... _ = file.write(b'some other data')
>>> del conn.root()[2]
>>> transaction.commit()
>>> old_size = os.stat('data.fs').st_size
......@@ -77,11 +81,14 @@ The pack_keep_old constructor argument controls whether a .old file (and .old di
>>> db = ZODB.DB(fs)
>>> conn = db.open()
>>> conn.root()[1] = ZODB.blob.Blob()
>>> _ = conn.root()[1].open('w').write(b'some data')
>>> with conn.root()[1].open('w') as file:
... file.write(b'some data')
>>> conn.root()[2] = ZODB.blob.Blob()
>>> _ = conn.root()[2].open('w').write(b'some data')
>>> with conn.root()[2].open('w') as file:
... _ = file.write(b'some data')
>>> transaction.commit()
>>> _ = conn.root()[1].open('w').write(b'some other data')
>>> with conn.root()[1].open('w') as file:
... _ = file.write(b'some other data')
>>> del conn.root()[2]
>>> transaction.commit()
......@@ -117,7 +124,8 @@ def pack_with_repeated_blob_records():
>>> trans = tm.begin()
>>> fs.tpc_begin(trans)
>>> _ = open('ablob', 'w').write('some data')
>>> with open('ablob', 'w') as file:
... _ = file.write('some data')
>>> _ = fs.store(oid, oldserial, blob_record, '', trans)
>>> _ = fs.storeBlob(oid, oldserial, blob_record, 'ablob', '', trans)
>>> fs.tpc_vote(trans)
......@@ -182,11 +190,9 @@ def test_suite():
doctest.DocFileSuite(
'zconfig.txt', 'iterator.test',
setUp=ZODB.tests.util.setUp, tearDown=ZODB.tests.util.tearDown,
checker=checker
),
checker=checker),
doctest.DocTestSuite(
setUp=ZODB.tests.util.setUp, tearDown=ZODB.tests.util.tearDown,
checker=checker
),
checker=checker),
))
......@@ -81,7 +81,8 @@ class Blob(persistent.Persistent):
raise TypeError('Blobs do not support subclassing.')
self.__setstate__()
if data is not None:
self.open('w').write(data)
with self.open('w') as file:
file.write(data)
def __setstate__(self, state=None):
# we use lists here because it will allow us to add and remove
......
......@@ -38,9 +38,6 @@
# high-order bytes when saving. On loading data, we add the leading
# bytes back before using u64 to convert the data back to (long)
# integers.
from __future__ import with_statement
import struct
from BTrees._fsBTree import fsBucket
......@@ -189,7 +186,7 @@ class fsIndex(object):
iterkeys = __iter__
def keys(self):
return list(six.iterkeys(self))
return list(self.iterkeys())
def iteritems(self):
for prefix, tree in six.iteritems(self._data):
......@@ -205,7 +202,7 @@ class fsIndex(object):
yield str2num(value)
def values(self):
return list(six.itervalues(self))
return list(self.itervalues())
# Comment below applies for the following minKey and maxKey methods
#
......
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
#!/usr/bin/env python2.4
#!/usr/bin/env python
# Based on a transaction analyzer by Matt Kromer.
from __future__ import print_function
import pickle
import sys
from ZODB.FileStorage import FileStorage
......
#!/usr/bin/env python2.3
#!/usr/bin/env python
"""Check the consistency of BTrees in a Data.fs
usage: checkbtrees.py data.fs
......@@ -8,16 +7,6 @@ Try to find all the BTrees in a Data.fs, call their _check() methods,
and run them through BTrees.check.check().
"""
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
import ZODB
from ZODB.FileStorage import FileStorage
from BTrees.check import check
......
#!/usr/bin/env python2.3
#!/usr/bin/env python
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
......@@ -63,10 +62,6 @@ revisions of objects; therefore fsrefs cannot find problems in versions or
in non-current revisions.
"""
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
import traceback
from ZODB.FileStorage import FileStorage
......@@ -110,7 +105,7 @@ def main(path=None):
path, = args
fs = FileStorage(path, read_only=1)
# Set of oids in the index that failed to load due to POSKeyError.
......
#!/usr/bin/env python2.3
#!/usr/bin/env python2
"""Print details statistics from fsdump output."""
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
import re
import sys
import six
......
#!/usr/bin/env python2.3
#!/usr/bin/env python
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
......@@ -15,8 +14,6 @@
##############################################################################
"""Tool to dump the last few transactions from a FileStorage."""
from __future__ import print_function
from __future__ import print_function
from ZODB.fstools import prev_txn
import binascii
......
#!/usr/bin/env python2.3
#!/usr/bin/env python
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
......@@ -13,7 +12,6 @@
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Simple consistency checker for FileStorage.
usage: fstest.py [-v] data.fs
......@@ -33,7 +31,6 @@ possible for the damage to occur only in the part of the file that
stores object pickles. Those errors will go undetected.
"""
from __future__ import print_function
from __future__ import print_function
# The implementation is based closely on the read_index() function in
# ZODB.FileStorage. If anything about the FileStorage layout changes,
......
#!/usr/bin/env python2.3
#!/usr/bin/env python
##############################################################################
#
# Copyright (c) 2001, 2002, 2003 Zope Foundation and Contributors.
......@@ -13,7 +12,6 @@
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""A script to gather statistics while doing a storage migration.
This is very similar to a standard storage's copyTransactionsFrom() method,
......@@ -75,25 +73,6 @@ Positional arguments:
pairs. E.g. "name=full;frequency=3600"
"""
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
import re
import sys
import time
......
......@@ -14,8 +14,6 @@
"""A script to migrate a blob directory into a different layout.
"""
from __future__ import print_function
from __future__ import print_function
import logging
import optparse
import os
......
#!/usr/bin/env python2.3
#!/usr/bin/env python
"""Report on the net size of objects counting subobjects.
usage: netspace.py [-P | -v] data.fs
......@@ -8,11 +7,6 @@ usage: netspace.py [-P | -v] data.fs
-v: print info for all objects, even if a traversal path isn't found
"""
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
import ZODB
from ZODB.FileStorage import FileStorage
from ZODB.utils import U64, get_pickle_metadata
......
#!/usr/bin/env python2.3
#!/usr/bin/env python
"""Report on the space used by objects in a storage.
usage: space.py data.fs
......@@ -9,12 +8,6 @@ The current implementation only supports FileStorage.
Current limitations / simplifications: Ignores revisions and versions.
"""
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from ZODB.FileStorage import FileStorage
from ZODB.utils import U64, get_pickle_metadata
import six
......
#!/usr/bin/env python2.3
#!/usr/bin/env python
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
......@@ -112,14 +111,8 @@ Usage: loadmail2 [options]
-mbox 'foo.mbox 300 -100'
are equivalent
$Id$
"""
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
from __future__ import print_function
import mailbox
import math
import os
......@@ -129,8 +122,6 @@ import sys
import threading
import time
import transaction
from six.moves import filter
import six
class JobProducer:
......@@ -189,7 +180,7 @@ class MBox:
self.number = start
while min:
six.advance_iterator(mb)
next(mb)
min -= 1
self._lock = threading.Lock()
......@@ -201,7 +192,7 @@ class MBox:
try:
if self._max > 0 and self.number >= self._max:
raise IndexError(self.number + 1)
message = six.advance_iterator(self._mbox)
message = next(self._mbox)
message.body = message.fp.read()
message.headers = list(message.headers)
self.number += 1
......@@ -233,7 +224,7 @@ def VmSize():
except:
return 0
else:
l = filter(lambda l: l[:7] == 'VmSize:', f.readlines())
l = list(filter(lambda l: l[:7] == 'VmSize:', f.readlines()))
if l:
l = l[0][7:].strip().split()[0]
return int(l)
......@@ -328,7 +319,7 @@ def run1(tid, db, factory, job, args):
def run(jobs, tid=b''):
import Zope2
while 1:
factory, job, args, repeatp = six.advance_iterator(jobs)
factory, job, args, repeatp = next(jobs)
run1(tid, Zope2.DB, factory, job, args)
if repeatp:
while 1:
......@@ -392,7 +383,7 @@ class IndexJob:
self.mbox, self.number, self.max = mbox, int(number), int(max)
def create(self):
messages = [six.advance_iterator(self.mbox) for i in range(self.number)]
messages = [next(self.mbox) for i in range(self.number)]
return index, (messages, self.catalog, self.max)
......
......@@ -449,7 +449,7 @@ class ObjectWriter:
return self._dump(meta, obj.__getstate__())
def _dump(self, classmeta, state):
# To reuse the existing cBytesIO object, we must reset
# To reuse the existing BytesIO object, we must reset
# the file position to 0 and truncate the file after the
# new pickle is written.
self._file.seek(0)
......@@ -659,6 +659,7 @@ def referencesf(p, oids=None):
u.noload()
u.noload()
else:
# Py3: There is no `noload()` in Python 3.
u.persistent_load = refs.append
u.load()
u.load()
......
......@@ -18,9 +18,6 @@ http://www.zope.org/Documentation/Developer/Models/ZODB/ZODB_Architecture_Storag
All storages should be able to pass these tests.
"""
from __future__ import with_statement
from ZODB import POSException
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_unpickle, zodb_pickle
......@@ -31,8 +28,6 @@ import time
import transaction
import zope.interface
import zope.interface.verify
from six.moves import map
from six.moves import zip
ZERO = b'\0'*8
......
......@@ -144,7 +144,7 @@ class IteratorStorage(IteratorCompare):
def checkIterateRecordsRepeatedly(self):
self._dostore()
tinfo = six.advance_iterator(self._storage.iterator())
tinfo = next(self._storage.iterator())
self.assertEquals(1, len(list(tinfo)))
self.assertEquals(1, len(list(tinfo)))
......@@ -152,7 +152,7 @@ class IteratorStorage(IteratorCompare):
self._dostore()
iterator = self._storage.iterator()
# We have one transaction with 1 modified object.
txn_1 = six.advance_iterator(iterator)
txn_1 = next(iterator)
self.assertEquals(1, len(list(txn_1)))
# We store another transaction with 1 object, the already running
......@@ -187,14 +187,14 @@ class ExtendedIteratorStorage(IteratorCompare):
txniter = self._storage.iterator(revid2, revid3)
self.iter_verify(txniter, [revid2, revid3], 12)
# Specify an upper bound somewhere in between values
revid3a = p64(int((U64(revid3) + U64(revid4)) / 2))
revid3a = p64((U64(revid3) + U64(revid4)) // 2)
txniter = self._storage.iterator(revid2, revid3a)
self.iter_verify(txniter, [revid2, revid3], 12)
# Specify a lower bound somewhere in between values.
# revid2 == revid1+1 is very likely on Windows. Adding 1 before
# dividing ensures that "the midpoint" we compute is strictly larger
# than revid1.
revid1a = p64(int((U64(revid1) + 1 + U64(revid2)) / 2))
revid1a = p64((U64(revid1) + 1 + U64(revid2)) // 2)
assert revid1 < revid1a
txniter = self._storage.iterator(revid1a, revid3a)
self.iter_verify(txniter, [revid2, revid3], 12)
......
......@@ -15,10 +15,6 @@ from ZODB.POSException import ConflictError
SHORT_DELAY = 0.01
def sort(l):
"Sort a list in place and return it."
return sorted(l)
class TestThread(threading.Thread):
"""Base class for defining threads that run from unittest.
......@@ -68,7 +64,7 @@ class ZODBClientThread(TestThread):
else:
for i in range(self.commits):
self.commit(d, i)
self.test.assertEqual(sort(d.keys()), list(range(self.commits)))
self.test.assertEqual(sorted(d.keys()), list(range(self.commits)))
def commit(self, d, num):
d[num] = time.time()
......
......@@ -12,9 +12,10 @@
#
##############################################################################
"""A minimal persistent object to use for tests"""
import functools
from persistent import Persistent
@functools.total_ordering
class MinPO(Persistent):
def __init__(self, value=None):
self.value = value
......@@ -26,5 +27,8 @@ class MinPO(Persistent):
def __eq__(self, aMinPO):
return self.value == aMinPO.value
def __lt__(self, aMinPO):
return self.value <= aMinPO.value
def __repr__(self):
return "MinPO(%s)" % self.value
......@@ -15,7 +15,6 @@
Any storage that supports undo() must pass these tests.
"""
import time
from persistent import Persistent
......@@ -29,9 +28,6 @@ from ZODB import DB
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_pickle, zodb_unpickle
from six.moves import map
import six
from six.moves import zip
ZERO = '\0'*8
......@@ -680,7 +676,7 @@ class TransactionalUndoStorage:
eq = self.assertEqual
for i in range(BATCHES):
txn = six.advance_iterator(transactions)
txn = next(transactions)
tid = p64(i + 1)
eq(txn.tid, tid)
......@@ -692,11 +688,11 @@ class TransactionalUndoStorage:
eq(L1, L2)
for i in range(BATCHES * OBJECTS):
txn = six.advance_iterator(transactions)
txn = next(transactions)
eq(len([rec for rec in txn if rec.data_txn is None]), 1)
for i in range(BATCHES):
txn = six.advance_iterator(transactions)
txn = next(transactions)
# The undos are performed in reverse order.
otid = p64(BATCHES - i)
......
......@@ -12,7 +12,7 @@
#
##############################################################################
"""Unit tests for the Connection class."""
from __future__ import print_function, with_statement
from __future__ import print_function
import doctest
import re
......@@ -676,7 +676,7 @@ implementation of checkCurrentSerialInTransaction.
>>> bad = set()
>>> def checkCurrentSerialInTransaction(oid, serial, trans):
... six.print_('checkCurrentSerialInTransaction', repr(oid))
... if not trans == transaction.get(): print('oops')
... if trans != transaction.get(): print('oops')
... if oid in bad:
... raise ReadConflictError(oid=oid)
......
......@@ -644,7 +644,8 @@ def pack_with_open_blob_files():
>>> import ZODB.blob
>>> conn1.root()[1] = ZODB.blob.Blob()
>>> conn1.add(conn1.root()[1])
>>> conn1.root()[1].open('w').write('some data')
>>> with conn1.root()[1].open('w') as file:
... file.write('some data')
>>> tm1.commit()
>>> tm2 = transaction.TransactionManager()
......@@ -652,7 +653,8 @@ def pack_with_open_blob_files():
>>> f = conn1.root()[1].open()
>>> conn1.root()[2] = ZODB.blob.Blob()
>>> conn1.add(conn1.root()[2])
>>> conn1.root()[2].open('w').write('some more data')
>>> with conn1.root()[2].open('w') as file:
... file.write('some more data')
>>> db.pack()
>>> f.read()
......
......@@ -27,7 +27,6 @@ from transaction import Transaction
import ZODB
from ZODB.MappingStorage import MappingStorage
import sys
import six
try:
import cPickle
......@@ -138,15 +137,15 @@ class PMTests(unittest.TestCase):
self.assertEqual(items,
[('a', 2), ('b', 3), ('name', 'bob'), ('x', 1)])
keys = list(six.iterkeys(m))
keys = list(m.iterkeys())
keys.sort()
self.assertEqual(keys, ['a', 'b', 'name', 'x'])
values = list(six.itervalues(m))
values = list(m.itervalues())
values.sort()
self.assertEqual(values, [1, 2, 3, 'bob'])
items = list(six.iteritems(m))
items = list(m.iteritems())
items.sort()
self.assertEqual(items,
[('a', 2), ('b', 3), ('name', 'bob'), ('x', 1)])
......@@ -169,7 +168,7 @@ class PMTests(unittest.TestCase):
keylist = []
while 1:
try:
key = six.advance_iterator(i)
key = next(i)
except StopIteration:
break
keylist.append(key)
......
......@@ -22,7 +22,7 @@ from zope.testing import renormalizing
from ZODB.utils import U64, p64, u64
try:
long(1)
long
except NameError:
# Py3
long = int
......
......@@ -11,7 +11,6 @@
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from persistent import Persistent
from persistent.mapping import PersistentMapping
from ZODB.POSException import ReadConflictError
......@@ -24,8 +23,6 @@ import ZODB
import ZODB.FileStorage
import ZODB.MappingStorage
import ZODB.tests.util
from six.moves import map
from six.moves import zip
class P(Persistent):
pass
......@@ -108,8 +105,8 @@ class ZODBTests(ZODB.tests.util.TestCase):
l1.sort()
l2 = list(ob2.items())
l2.sort()
l1 = map(lambda k_v: (k_v[0], k_v[1][0]), l1)
l2 = map(lambda k_v1: (k_v1[0], k_v1[1][0]), l2)
l1 = list(map(lambda k_v: (k_v[0], k_v[1][0]), l1))
l2 = list(map(lambda k_v1: (k_v1[0], k_v1[1][0]), l2))
self.assertEqual(l1, l2)
self.assert_(ob._p_oid != ob2._p_oid)
self.assertEqual(ob._p_jar, ob2._p_jar)
......
......@@ -18,8 +18,6 @@ storage to use for unit tests. MappingStorage isn't sufficient.
Since even a minimal storage has some complexity, we run standard
storage tests against the test storage.
"""
from __future__ import with_statement
import bisect
import unittest
......
......@@ -163,13 +163,15 @@ class BlobUndoTests(BlobTestBase):
root = connection.root()
transaction.begin()
blob = Blob()
blob.open('w').write('this is state 1')
with blob.open('w') as file:
file.write('this is state 1')
root['blob'] = blob
transaction.commit()
transaction.begin()
blob = root['blob']
blob.open('w').write('this is state 2')
with blob.open('w') as file:
file.write('this is state 2')
transaction.commit()
......@@ -184,7 +186,8 @@ class BlobUndoTests(BlobTestBase):
connection = database.open()
root = connection.root()
transaction.begin()
open('consume1', 'w').write('this is state 1')
with open('consume1', 'w') as file:
file.write('this is state 1')
blob = Blob()
blob.consumeFile('consume1')
root['blob'] = blob
......@@ -192,7 +195,8 @@ class BlobUndoTests(BlobTestBase):
transaction.begin()
blob = root['blob']
open('consume2', 'w').write('this is state 2')
with open('consume2', 'w') as file:
file.write('this is state 2')
blob.consumeFile('consume2')
transaction.commit()
......@@ -210,13 +214,15 @@ class BlobUndoTests(BlobTestBase):
blob = Blob()
transaction.begin()
blob.open('w').write('this is state 1')
with blob.open('w') as file:
file.write('this is state 1')
root['blob'] = blob
transaction.commit()
transaction.begin()
blob = root['blob']
blob.open('w').write('this is state 2')
with blob.open('w') as file:
file.write('this is state 2')
transaction.commit()
database.undo(database.undoLog(0, 1)[0]['id'])
......@@ -238,7 +244,8 @@ class BlobUndoTests(BlobTestBase):
blob = Blob()
transaction.begin()
blob.open('w').write('this is state 1')
with blob.open('w') as file:
file.write('this is state 1')
root['blob'] = blob
transaction.commit()
......@@ -276,17 +283,20 @@ class RecoveryBlobStorage(BlobTestBase,
conn.root()[1] = ZODB.blob.Blob()
transaction.commit()
conn.root()[2] = ZODB.blob.Blob()
conn.root()[2].open('w').write('some data')
with conn.root()[2].open('w') as file:
file.write('some data')
transaction.commit()
conn.root()[3] = ZODB.blob.Blob()
conn.root()[3].open('w').write(
(''.join(struct.pack(">I", random.randint(0, (1<<32)-1))
for i in range(random.randint(10000,20000)))
)[:-random.randint(1,4)]
)
with conn.root()[3].open('w') as file:
file.write(
(''.join(struct.pack(">I", random.randint(0, (1<<32)-1))
for i in range(random.randint(10000,20000)))
)[:-random.randint(1,4)]
)
transaction.commit()
conn.root()[2] = ZODB.blob.Blob()
conn.root()[2].open('w').write('some other data')
with conn.root()[2].open('w') as file:
file.write('some other data')
transaction.commit()
self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst)
......@@ -295,7 +305,8 @@ class RecoveryBlobStorage(BlobTestBase,
def gc_blob_removes_uncommitted_data():
"""
>>> blob = Blob()
>>> blob.open('w').write('x')
>>> with blob.open('w') as file:
... file.write('x')
>>> fname = blob._p_blob_uncommitted
>>> os.path.exists(fname)
True
......@@ -330,7 +341,8 @@ def commit_from_wrong_partition():
>>> root = connection.root()
>>> from ZODB.blob import Blob
>>> root['blob'] = Blob()
>>> root['blob'].open('w').write('test')
>>> with root['blob'].open('w') as file:
... file.write('test')
>>> transaction.commit() # doctest: +ELLIPSIS
Copied blob file ...
......@@ -340,7 +352,8 @@ def commit_from_wrong_partition():
Works with savepoints too:
>>> root['blob2'] = Blob()
>>> root['blob2'].open('w').write('test2')
>>> with root['blob2'].open('w') as file:
... file.write('test2')
>>> _ = transaction.savepoint() # doctest: +ELLIPSIS
Copied blob file ...
......@@ -379,7 +392,8 @@ def packing_with_uncommitted_data_non_undoing():
>>> from ZODB.blob import Blob
>>> root['blob'] = Blob()
>>> connection.add(root['blob'])
>>> root['blob'].open('w').write('test')
>>> with root['blob'].open('w') as file:
... file.write('test')
>>> blob_storage.pack(new_time(), referencesf)
......@@ -406,7 +420,8 @@ def packing_with_uncommitted_data_undoing():
>>> from ZODB.blob import Blob
>>> root['blob'] = Blob()
>>> connection.add(root['blob'])
>>> root['blob'].open('w').write('test')
>>> with root['blob'].open('w') as file:
... file.write('test')
>>> blob_storage.pack(new_time(), referencesf)
......@@ -478,7 +493,8 @@ def loadblob_tmpstore():
>>> from ZODB.blob import Blob
>>> root['blob'] = Blob()
>>> connection.add(root['blob'])
>>> root['blob'].open('w').write('test')
>>> with root['blob'].open('w') as file:
... file.write('test')
>>> import transaction
>>> transaction.commit()
>>> blob_oid = root['blob']._p_oid
......@@ -540,7 +556,8 @@ def do_not_depend_on_cwd():
>>> db = DB(bs)
>>> conn = db.open()
>>> conn.root()['blob'] = ZODB.blob.Blob()
>>> conn.root()['blob'].open('w').write('data')
>>> with conn.root()['blob'].open('w') as file:
... file.write('data')
>>> transaction.commit()
>>> os.chdir(here)
>>> conn.root()['blob'].open().read()
......@@ -557,11 +574,13 @@ def savepoint_isolation():
>>> conn = db.open()
>>> conn.root.b = ZODB.blob.Blob('initial')
>>> transaction.commit()
>>> conn.root.b.open('w').write('1')
>>> with conn.root.b.open('w') as file:
... file.write('1')
>>> _ = transaction.savepoint()
>>> tm = transaction.TransactionManager()
>>> conn2 = db.open(transaction_manager=tm)
>>> conn2.root.b.open('w').write('2')
>>> with conn2.root.b.open('w') as file:
... file.write('2')
>>> _ = tm.savepoint()
>>> conn.root.b.open().read()
'1'
......@@ -585,12 +604,14 @@ def savepoint_commits_without_invalidations_out_of_order():
>>> conn1 = db.open(transaction_manager=tm1)
>>> conn1.root.b = ZODB.blob.Blob('initial')
>>> tm1.commit()
>>> conn1.root.b.open('w').write('1')
>>> with conn1.root.b.open('w') as file:
... file.write('1')
>>> _ = tm1.savepoint()
>>> tm2 = transaction.TransactionManager()
>>> conn2 = db.open(transaction_manager=tm2)
>>> conn2.root.b.open('w').write('2')
>>> with conn2.root.b.open('w') as file:
... file.write('2')
>>> _ = tm1.savepoint()
>>> conn1.root.b.open().read()
'1'
......@@ -624,7 +645,8 @@ def savepoint_cleanup():
[]
>>> conn.root.b = ZODB.blob.Blob('initial')
>>> transaction.commit()
>>> conn.root.b.open('w').write('1')
>>> with conn.root.b.open('w') as file:
... file.write('1')
>>> _ = transaction.savepoint()
>>> transaction.abort()
>>> os.listdir(tdir)
......
......@@ -13,9 +13,6 @@
##############################################################################
"""Conventience function for creating test databases
"""
from __future__ import with_statement
from ZODB.MappingStorage import DB
import atexit
......
......@@ -78,8 +78,6 @@ def deprecated38(msg):
if sys.version_info[0] < 3:
bytes = str
def as_bytes(obj):
"Convert obj into bytes"
return str(obj)
......@@ -93,10 +91,6 @@ if sys.version_info[0] < 3:
byte_chr = chr
else:
import builtins
bytes = builtins.bytes
def as_bytes(obj):
if isinstance(obj, bytes):
# invoking str on a bytes object gives its repr()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment