Commit 0c8c1b52 authored by Jim Fulton's avatar Jim Fulton

Refactored most of the blob-storage tests to be usable with different

blob-storage implementations.
parent 59425057
......@@ -52,6 +52,7 @@ entry_points = """
zeopasswd = ZEO.zeopasswd:main
mkzeoinst = ZEO.mkzeoinst:main
zeoctl = ZEO.zeoctl:main
remove-old-zeo-cached-blobs = ZEO.ClientStorage:check_blob_size_script
"""
scripts = []
......
##############################################################################
#
# Copyright (c) 2005 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
Connection support for Blobs tests
==================================
......@@ -30,13 +16,8 @@ We also need a database with a blob supporting storage. (We're going to use
FileStorage rather than MappingStorage here because we will want ``loadBefore``
for one of our examples.)
>>> import ZODB.FileStorage
>>> from ZODB.blob import BlobStorage
>>> blob_storage = create_storage()
>>> from ZODB.DB import DB
>>> base_storage = ZODB.FileStorage.FileStorage(
... 'BlobTests.fs', create=True)
>>> blob_dir = 'blobs'
>>> blob_storage = BlobStorage(blob_dir, base_storage)
>>> database = DB(blob_storage)
Putting a Blob into a Connection works like every other object:
......
......@@ -17,25 +17,13 @@ Import/export support for blob data
Set up:
>>> from ZODB.FileStorage import FileStorage
>>> from ZODB.blob import Blob, BlobStorage
>>> from ZODB.DB import DB
>>> import ZODB.blob, transaction
>>> from persistent.mapping import PersistentMapping
>>> import shutil
>>> import transaction
>>> storagefile1 = 'Data.fs.1'
>>> blob_dir1 = 'blobs1'
>>> storagefile2 = 'Data.fs.2'
>>> blob_dir2 = 'blobs2'
We need an database with an undoing blob supporting storage:
>>> base_storage1 = FileStorage(storagefile1)
>>> blob_storage1 = BlobStorage(blob_dir1, base_storage1)
>>> base_storage2 = FileStorage(storagefile2)
>>> blob_storage2 = BlobStorage(blob_dir2, base_storage2)
>>> database1 = DB(blob_storage1)
>>> database2 = DB(blob_storage2)
>>> database1 = ZODB.DB(create_storage('1'))
>>> database2 = ZODB.DB(create_storage('2'))
Create our root object for database1:
......@@ -46,12 +34,11 @@ Put a couple blob objects in our database1 and on the filesystem:
>>> import time, os
>>> nothing = transaction.begin()
>>> tid = blob_storage1._tid
>>> data1 = 'x'*100000
>>> blob1 = Blob()
>>> blob1 = ZODB.blob.Blob()
>>> blob1.open('w').write(data1)
>>> data2 = 'y'*100000
>>> blob2 = Blob()
>>> blob2 = ZODB.blob.Blob()
>>> blob2.open('w').write(data2)
>>> d = PersistentMapping({'blob1':blob1, 'blob2':blob2})
>>> root1['blobdata'] = d
......@@ -85,17 +72,7 @@ Make sure our data exists:
True
>>> transaction.get().abort()
Clean up our blob directory:
>>> base_storage1.close()
>>> base_storage2.close()
>>> import ZODB.blob
>>> ZODB.blob.remove_committed_dir(blob_dir1)
>>> ZODB.blob.remove_committed_dir(blob_dir2)
>>> os.unlink(exportfile)
>>> os.unlink(storagefile1)
>>> os.unlink(storagefile1+".index")
>>> os.unlink(storagefile1+".tmp")
>>> os.unlink(storagefile2)
>>> os.unlink(storagefile2+".index")
>>> os.unlink(storagefile2+".tmp")
.. cleanup
>>> database1.close()
>>> database2.close()
......@@ -17,20 +17,16 @@ Transaction support for Blobs
We need a database with a blob supporting storage::
>>> from ZODB.MappingStorage import MappingStorage
>>> from ZODB.blob import Blob, BlobStorage
>>> from ZODB.DB import DB
>>> import transaction
>>> base_storage = MappingStorage("test")
>>> import ZODB.blob, transaction
>>> blob_dir = 'blobs'
>>> blob_storage = BlobStorage(blob_dir, base_storage)
>>> database = DB(blob_storage)
>>> blob_storage = create_storage(blob_dir=blob_dir)
>>> database = ZODB.DB(blob_storage)
>>> connection1 = database.open()
>>> root1 = connection1.root()
Putting a Blob into a Connection works like any other Persistent object::
>>> blob1 = Blob()
>>> blob1 = ZODB.blob.Blob()
>>> blob1.open('w').write('this is blob 1')
>>> root1['blob1'] = blob1
>>> 'blob1' in root1
......@@ -130,7 +126,7 @@ when we start)::
We can open more than one blob object during the course of a single
transaction::
>>> blob2 = Blob()
>>> blob2 = ZODB.blob.Blob()
>>> blob2.open('w').write('this is blob 3')
>>> root2['blob2'] = blob2
>>> transaction.commit()
......@@ -189,16 +185,6 @@ connections::
>>> root4['blob1'].open('r').read()
'this is blob 1woot!this is from connection 3'
BlobStorages implementation of getSize() does not include the blob data and
only returns what the underlying storages do. (We need to ensure the last
number to be an int, otherwise it will be a long on 32-bit platforms and an
int on 64-bit)::
>>> underlying_size = base_storage.getSize()
>>> blob_size = blob_storage.getSize()
>>> int(blob_size - underlying_size)
0
You can't commit a transaction while blob files are open:
>>> f = root3['blob1'].open('w')
......@@ -227,7 +213,7 @@ We do support optimistic savepoints:
>>> connection5 = database.open()
>>> root5 = connection5.root()
>>> blob = Blob()
>>> blob = ZODB.blob.Blob()
>>> blob_fh = blob.open("w")
>>> blob_fh.write("I'm a happy blob.")
>>> blob_fh.close()
......@@ -297,7 +283,7 @@ file that can be opened.
>>> connection6 = database.open()
>>> root6 = connection6.root()
>>> blob = Blob()
>>> blob = ZODB.blob.Blob()
>>> blob_fh = blob.open("w")
>>> blob_fh.write("I'm a happy blob.")
>>> blob_fh.close()
......@@ -330,7 +316,7 @@ and doesn't prevent us from opening the blob for writing:
An exception is raised if we call committed on a blob that has
uncommitted changes:
>>> blob = Blob()
>>> blob = ZODB.blob.Blob()
>>> blob.committed()
Traceback (most recent call last):
...
......@@ -375,55 +361,55 @@ You can't open a committed blob file for writing:
...
IOError: ...
tpc_abort with dirty data
-------------------------
When `tpc_abort` is called during the first commit phase we need to be able to
clean up dirty files:
>>> class DummyBaseStorage(object):
... def tpc_abort(self):
... pass
>>> base_storage = DummyBaseStorage()
>>> blob_dir2 = 'blobs2'
>>> blob_storage2 = BlobStorage(blob_dir2, base_storage)
>>> committed_blob_dir = blob_storage2.fshelper.getPathForOID(0)
>>> os.makedirs(committed_blob_dir)
>>> committed_blob_file = blob_storage2.fshelper.getBlobFilename(0, 0)
>>> open(os.path.join(committed_blob_file), 'w').write('foo')
>>> os.path.exists(committed_blob_file)
True
tpc_abort
---------
Now, telling the storage that Blob 0 and Blob 1 (both with serial 0) are dirty
will: remove the committed file for Blob 0 and ignore the fact that Blob 1 is
set to dirty but doesn't actually have an existing file:
If a transaction is aborted in the middle of 2-phase commit, any data
stored are discarded.
>>> blob_storage2.dirty_oids = [(0, 0), (1, 0)]
>>> blob_storage2.tpc_abort()
>>> os.path.exists(committed_blob_file)
False
>>> olddata, oldserial = blob_storage.load(blob._p_oid, '')
>>> t = transaction.get()
>>> blob_storage.tpc_begin(t)
>>> open('blobfile', 'w').write('This data should go away')
>>> s1 = blob_storage.storeBlob(blob._p_oid, oldserial, olddata, 'blobfile',
... '', t)
>>> new_oid = blob_storage.new_oid()
>>> open('blobfile2', 'w').write('This data should go away too')
>>> s2 = blob_storage.storeBlob(new_oid, '\0'*8, olddata, 'blobfile2',
... '', t)
>>> blob_storage.tpc_abort(t)
Now, the serial for the existing blob should be the same:
Note: This is a counter measure against regression of bug #126007.
>>> blob_storage.load(blob._p_oid, '') == (olddata, oldserial)
True
`getSize` iterates over the existing blob files in the blob directory and adds
up their size. The blob directory sometimes contains temporary files that the
getSize function needs to ignore:
And we shouldn't be able to read the data that we saved:
>>> garbage_file = os.path.join(blob_dir, 'garbage')
>>> open(garbage_file, 'w').write('garbage')
>>> int(blob_storage.getSize())
2673
>>> blob_storage.loadBlob(blob._p_oid, s1)
Traceback (most recent call last):
...
POSKeyError: 'No blob file'
Note: This is a counter measer against regression of bug #12991.
Of course the old data should be unaffected:
Teardown
--------
>>> open(blob_storage.loadBlob(blob._p_oid, oldserial)).read()
"I'm a happy blob."
Similarly, the new object wasn't added to the storage:
>>> blob_storage.load(new_oid, '')
Traceback (most recent call last):
...
POSKeyError: 0x06
>>> blob_storage.loadBlob(blob._p_oid, s2)
Traceback (most recent call last):
...
POSKeyError: 'No blob file'
We don't need the storage directory and databases anymore::
.. clean up
>>> tm1.abort()
>>> tm2.abort()
>>> database.close()
>>> rmtree(blob_dir)
>>> rmtree(blob_dir2)
......@@ -39,10 +39,10 @@ import ZConfig
import ZODB.blob
import ZODB.interfaces
import ZODB.tests.IteratorStorage
import ZODB.tests.StorageTestBase
import ZODB.tests.util
import zope.testing.renormalizing
def new_time():
"""Create a _new_ time stamp.
......@@ -95,8 +95,13 @@ class ZODBBlobConfigTest(ConfigTestBase):
</zodb>
""")
class BlobTestBase(ZODB.tests.StorageTestBase.StorageTestBase):
def setUp(self):
ZODB.tests.StorageTestBase.StorageTestBase.setUp(self)
self._storage = self.create_storage()
class BlobCloneTests(ZODB.tests.util.TestCase):
class BlobCloneTests(BlobTestBase):
def testDeepCopyCanInvalidate(self):
"""
......@@ -104,9 +109,7 @@ class BlobCloneTests(ZODB.tests.util.TestCase):
readers and writers values in cloned objects (see
http://mail.zope.org/pipermail/zodb-dev/2008-August/012054.html)
"""
base_storage = FileStorage('Data.fs')
blob_storage = BlobStorage('blobs', base_storage)
database = DB(blob_storage)
database = DB(self._storage)
connection = database.open()
root = connection.root()
transaction.begin()
......@@ -129,12 +132,10 @@ class BlobCloneTests(ZODB.tests.util.TestCase):
database.close()
class BlobUndoTests(ZODB.tests.util.TestCase):
class BlobUndoTests(BlobTestBase):
def testUndoWithoutPreviousVersion(self):
base_storage = FileStorage('Data.fs')
blob_storage = BlobStorage('blobs', base_storage)
database = DB(blob_storage)
database = DB(self._storage)
connection = database.open()
root = connection.root()
transaction.begin()
......@@ -149,9 +150,7 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
database.close()
def testUndo(self):
base_storage = FileStorage('Data.fs')
blob_storage = BlobStorage('blobs', base_storage)
database = DB(blob_storage)
database = DB(self._storage)
connection = database.open()
root = connection.root()
transaction.begin()
......@@ -173,9 +172,7 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
database.close()
def testUndoAfterConsumption(self):
base_storage = FileStorage('Data.fs')
blob_storage = BlobStorage('blobs', base_storage)
database = DB(blob_storage)
database = DB(self._storage)
connection = database.open()
root = connection.root()
transaction.begin()
......@@ -199,9 +196,7 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
database.close()
def testRedo(self):
base_storage = FileStorage('Data.fs')
blob_storage = BlobStorage('bobs', base_storage)
database = DB(blob_storage)
database = DB(self._storage)
connection = database.open()
root = connection.root()
blob = Blob()
......@@ -221,8 +216,6 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
self.assertEqual(blob.open('r').read(), 'this is state 1')
serial = base64.encodestring(blob_storage._tid)
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
......@@ -231,9 +224,7 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
database.close()
def testRedoOfCreation(self):
base_storage = FileStorage('Data.fs')
blob_storage = BlobStorage('blobs', base_storage)
database = DB(blob_storage)
database = DB(self._storage)
connection = database.open()
root = connection.root()
blob = Blob()
......@@ -256,20 +247,16 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
database.close()
class RecoveryBlobStorage(ZODB.tests.util.TestCase,
class RecoveryBlobStorage(BlobTestBase,
ZODB.tests.IteratorStorage.IteratorDeepCompare):
def setUp(self):
ZODB.tests.util.TestCase.setUp(self)
self._storage = BlobStorage(
'src_blobs', ZODB.FileStorage.FileStorage("Source.fs", create=True))
self._dst = BlobStorage(
'dest_blobs', ZODB.FileStorage.FileStorage("Dest.fs", create=True))
BlobTestBase.setUp(self)
self._dst = self.create_storage('dest')
def tearDown(self):
self._storage.close()
self._dst.close()
ZODB.tests.util.TestCase.tearDown(self)
BlobTestBase.tearDown(self)
# Requires a setUp() that creates a self._dst destination storage
def testSimpleBlobRecovery(self):
......@@ -299,7 +286,6 @@ class RecoveryBlobStorage(ZODB.tests.util.TestCase,
def gc_blob_removes_uncommitted_data():
"""
>>> from ZODB.blob import Blob
>>> blob = Blob()
>>> blob.open('w').write('x')
>>> fname = blob._p_blob_uncommitted
......@@ -323,19 +309,14 @@ def commit_from_wrong_partition():
>>> os_rename = os.rename
>>> os.rename = fail
>>> import logging, sys
>>> import logging
>>> logger = logging.getLogger('ZODB.blob.copied')
>>> handler = logging.StreamHandler(sys.stdout)
>>> logger.propagate = False
>>> logger.setLevel(logging.DEBUG)
>>> logger.addHandler(handler)
>>> import transaction
>>> from ZODB.MappingStorage import MappingStorage
>>> from ZODB.blob import BlobStorage
>>> from ZODB.DB import DB
>>> base_storage = MappingStorage("test")
>>> blob_storage = BlobStorage('blobs', base_storage)
>>> blob_storage = create_storage()
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
......@@ -378,13 +359,10 @@ def packing_with_uncommitted_data_non_undoing():
temporary directory that is ignored while packing.
>>> import transaction
>>> from ZODB.MappingStorage import MappingStorage
>>> from ZODB.blob import BlobStorage
>>> from ZODB.DB import DB
>>> from ZODB.serialize import referencesf
>>> base_storage = MappingStorage("test")
>>> blob_storage = BlobStorage('blobs', base_storage)
>>> blob_storage = create_storage()
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
......@@ -409,14 +387,9 @@ def packing_with_uncommitted_data_undoing():
blob_directory and confused our packing strategy. We now use a separate
temporary directory that is ignored while packing.
>>> import transaction
>>> from ZODB.FileStorage.FileStorage import FileStorage
>>> from ZODB.blob import BlobStorage
>>> from ZODB.DB import DB
>>> from ZODB.serialize import referencesf
>>> base_storage = FileStorage('Data.fs')
>>> blob_storage = BlobStorage('blobs', base_storage)
>>> blob_storage = create_storage()
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
......@@ -438,12 +411,7 @@ def secure_blob_directory():
This is a test for secure creation and verification of secure settings of
blob directories.
>>> from ZODB.FileStorage.FileStorage import FileStorage
>>> from ZODB.blob import BlobStorage
>>> import os.path
>>> base_storage = FileStorage('Data.fs')
>>> blob_storage = BlobStorage('blobs', base_storage)
>>> blob_storage = create_storage(blob_dir='blobs')
Two directories are created:
......@@ -493,14 +461,7 @@ def loadblob_tmpstore():
First, let's setup a regular database and store a blob:
>>> import transaction
>>> from ZODB.FileStorage.FileStorage import FileStorage
>>> from ZODB.blob import BlobStorage
>>> from ZODB.DB import DB
>>> from ZODB.serialize import referencesf
>>> base_storage = FileStorage('Data.fs')
>>> blob_storage = BlobStorage('blobs', base_storage)
>>> blob_storage = create_storage()
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
......@@ -533,15 +494,14 @@ def loadblob_tmpstore():
def is_blob_record():
r"""
>>> fs = FileStorage('Data.fs')
>>> bs = ZODB.blob.BlobStorage('blobs', fs)
>>> bs = create_storage()
>>> db = DB(bs)
>>> conn = db.open()
>>> conn.root()['blob'] = ZODB.blob.Blob()
>>> transaction.commit()
>>> ZODB.blob.is_blob_record(fs.load(ZODB.utils.p64(0), '')[0])
>>> ZODB.blob.is_blob_record(bs.load(ZODB.utils.p64(0), '')[0])
False
>>> ZODB.blob.is_blob_record(fs.load(ZODB.utils.p64(1), '')[0])
>>> ZODB.blob.is_blob_record(bs.load(ZODB.utils.p64(1), '')[0])
True
An invalid pickle yields a false value:
......@@ -558,8 +518,7 @@ def is_blob_record():
def do_not_depend_on_cwd():
"""
>>> from ZODB.MappingStorage import MappingStorage
>>> bs = ZODB.blob.BlobStorage('blobs', MappingStorage())
>>> bs = create_storage()
>>> here = os.getcwd()
>>> os.mkdir('evil')
>>> os.chdir('evil')
......@@ -578,12 +537,67 @@ def setUp(test):
ZODB.tests.util.setUp(test)
test.globs['rmtree'] = zope.testing.setupstack.rmtree
def setUpBlobAdaptedFileStorage(test):
setUp(test)
def create_storage(name='data', blob_dir=None):
if blob_dir is None:
blob_dir = '%s.bobs' % name
return ZODB.blob.BlobStorage(blob_dir, FileStorage('%s.fs' % name))
test.globs['create_storage'] = create_storage
def storage_reusable_suite(prefix, factory):
"""Return a test suite for a generic IBlobStorage.
Pass a factory taking a name and a blob directory name.
"""
def setup(test):
setUp(test)
def create_storage(name='data', blob_dir=None):
if blob_dir is None:
blob_dir = '%s.bobs' % name
return factory(name, blob_dir)
test.globs['create_storage'] = create_storage
suite = unittest.TestSuite()
suite.addTest(doctest.DocFileSuite(
"blob_connection.txt", "blob_importexport.txt",
"blob_transaction.txt",
setUp=setup, tearDown=zope.testing.setupstack.tearDown,
optionflags=doctest.ELLIPSIS,
))
suite.addTest(doctest.DocTestSuite(
setUp=setup, tearDown=zope.testing.setupstack.tearDown,
checker = zope.testing.renormalizing.RENormalizing([
(re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'),
(re.compile(r'\%(sep)s' % dict(sep=os.path.sep)), '/'),
]),
))
def create_storage(self, name='data', blob_dir=None):
if blob_dir is None:
blob_dir = '%s.bobs' % name
return factory(name, blob_dir)
for class_ in (BlobCloneTests, BlobUndoTests, RecoveryBlobStorage):
new_class = class_.__class__(
prefix+class_.__name__, (class_, ),
dict(create_storage=create_storage),
)
suite.addTest(unittest.makeSuite(new_class))
return suite
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ZODBBlobConfigTest))
suite.addTest(doctest.DocFileSuite(
"blob_basic.txt", "blob_connection.txt", "blob_transaction.txt",
"blob_packing.txt", "blob_importexport.txt", "blob_consume.txt",
"blob_basic.txt",
"blob_packing.txt", "blob_consume.txt",
"blob_tempdir.txt",
setUp=setUp,
tearDown=zope.testing.setupstack.tearDown,
......@@ -600,17 +614,11 @@ def test_suite():
(re.compile(r'\S+/((old|bushy|lawn)/\S+/foo[23456]?)'), r'\1'),
]),
))
suite.addTest(doctest.DocTestSuite(
setUp=setUp,
tearDown=zope.testing.setupstack.tearDown,
checker = zope.testing.renormalizing.RENormalizing([
(re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'),
(re.compile(r'\%(sep)s' % dict(sep=os.path.sep)), '/'),
]),
suite.addTest(storage_reusable_suite(
'BlobAdaptedFileStorage',
lambda name, blob_dir:
ZODB.blob.BlobStorage(blob_dir, FileStorage('%s.fs' % name))
))
suite.addTest(unittest.makeSuite(BlobCloneTests))
suite.addTest(unittest.makeSuite(BlobUndoTests))
suite.addTest(unittest.makeSuite(RecoveryBlobStorage))
return suite
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment