Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Z
ZEO
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
ZEO
Commits
0c8c1b52
Commit
0c8c1b52
authored
Dec 05, 2008
by
Jim Fulton
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Refactored most of the blob-storage tests to be usable with different
blob-storage implementations.
parent
59425057
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
147 additions
and
194 deletions
+147
-194
setup.py
setup.py
+1
-0
src/ZODB/tests/blob_connection.txt
src/ZODB/tests/blob_connection.txt
+1
-20
src/ZODB/tests/blob_importexport.txt
src/ZODB/tests/blob_importexport.txt
+9
-32
src/ZODB/tests/blob_transaction.txt
src/ZODB/tests/blob_transaction.txt
+47
-61
src/ZODB/tests/testblob.py
src/ZODB/tests/testblob.py
+89
-81
No files found.
setup.py
View file @
0c8c1b52
...
@@ -52,6 +52,7 @@ entry_points = """
...
@@ -52,6 +52,7 @@ entry_points = """
zeopasswd = ZEO.zeopasswd:main
zeopasswd = ZEO.zeopasswd:main
mkzeoinst = ZEO.mkzeoinst:main
mkzeoinst = ZEO.mkzeoinst:main
zeoctl = ZEO.zeoctl:main
zeoctl = ZEO.zeoctl:main
remove-old-zeo-cached-blobs = ZEO.ClientStorage:check_blob_size_script
"""
"""
scripts
=
[]
scripts
=
[]
...
...
src/ZODB/tests/blob_connection.txt
View file @
0c8c1b52
##############################################################################
#
# Copyright (c) 2005 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
Connection support for Blobs tests
Connection support for Blobs tests
==================================
==================================
...
@@ -30,13 +16,8 @@ We also need a database with a blob supporting storage. (We're going to use
...
@@ -30,13 +16,8 @@ We also need a database with a blob supporting storage. (We're going to use
FileStorage rather than MappingStorage here because we will want ``loadBefore``
FileStorage rather than MappingStorage here because we will want ``loadBefore``
for one of our examples.)
for one of our examples.)
>>> import ZODB.FileStorage
>>> blob_storage = create_storage()
>>> from ZODB.blob import BlobStorage
>>> from ZODB.DB import DB
>>> from ZODB.DB import DB
>>> base_storage = ZODB.FileStorage.FileStorage(
... 'BlobTests.fs', create=True)
>>> blob_dir = 'blobs'
>>> blob_storage = BlobStorage(blob_dir, base_storage)
>>> database = DB(blob_storage)
>>> database = DB(blob_storage)
Putting a Blob into a Connection works like every other object:
Putting a Blob into a Connection works like every other object:
...
...
src/ZODB/tests/blob_importexport.txt
View file @
0c8c1b52
...
@@ -17,25 +17,13 @@ Import/export support for blob data
...
@@ -17,25 +17,13 @@ Import/export support for blob data
Set up:
Set up:
>>> from ZODB.FileStorage import FileStorage
>>> import ZODB.blob, transaction
>>> from ZODB.blob import Blob, BlobStorage
>>> from ZODB.DB import DB
>>> from persistent.mapping import PersistentMapping
>>> from persistent.mapping import PersistentMapping
>>> import shutil
>>> import transaction
>>> storagefile1 = 'Data.fs.1'
>>> blob_dir1 = 'blobs1'
>>> storagefile2 = 'Data.fs.2'
>>> blob_dir2 = 'blobs2'
We need an database with an undoing blob supporting storage:
We need an database with an undoing blob supporting storage:
>>> base_storage1 = FileStorage(storagefile1)
>>> database1 = ZODB.DB(create_storage('1'))
>>> blob_storage1 = BlobStorage(blob_dir1, base_storage1)
>>> database2 = ZODB.DB(create_storage('2'))
>>> base_storage2 = FileStorage(storagefile2)
>>> blob_storage2 = BlobStorage(blob_dir2, base_storage2)
>>> database1 = DB(blob_storage1)
>>> database2 = DB(blob_storage2)
Create our root object for database1:
Create our root object for database1:
...
@@ -46,12 +34,11 @@ Put a couple blob objects in our database1 and on the filesystem:
...
@@ -46,12 +34,11 @@ Put a couple blob objects in our database1 and on the filesystem:
>>> import time, os
>>> import time, os
>>> nothing = transaction.begin()
>>> nothing = transaction.begin()
>>> tid = blob_storage1._tid
>>> data1 = 'x'*100000
>>> data1 = 'x'*100000
>>> blob1 = Blob()
>>> blob1 =
ZODB.blob.
Blob()
>>> blob1.open('w').write(data1)
>>> blob1.open('w').write(data1)
>>> data2 = 'y'*100000
>>> data2 = 'y'*100000
>>> blob2 = Blob()
>>> blob2 =
ZODB.blob.
Blob()
>>> blob2.open('w').write(data2)
>>> blob2.open('w').write(data2)
>>> d = PersistentMapping({'blob1':blob1, 'blob2':blob2})
>>> d = PersistentMapping({'blob1':blob1, 'blob2':blob2})
>>> root1['blobdata'] = d
>>> root1['blobdata'] = d
...
@@ -85,17 +72,7 @@ Make sure our data exists:
...
@@ -85,17 +72,7 @@ Make sure our data exists:
True
True
>>> transaction.get().abort()
>>> transaction.get().abort()
Clean up our blob directory:
.. cleanup
>>> base_storage1.close()
>>> database1.close()
>>> base_storage2.close()
>>> database2.close()
>>> import ZODB.blob
>>> ZODB.blob.remove_committed_dir(blob_dir1)
>>> ZODB.blob.remove_committed_dir(blob_dir2)
>>> os.unlink(exportfile)
>>> os.unlink(storagefile1)
>>> os.unlink(storagefile1+".index")
>>> os.unlink(storagefile1+".tmp")
>>> os.unlink(storagefile2)
>>> os.unlink(storagefile2+".index")
>>> os.unlink(storagefile2+".tmp")
src/ZODB/tests/blob_transaction.txt
View file @
0c8c1b52
...
@@ -17,20 +17,16 @@ Transaction support for Blobs
...
@@ -17,20 +17,16 @@ Transaction support for Blobs
We need a database with a blob supporting storage::
We need a database with a blob supporting storage::
>>> from ZODB.MappingStorage import MappingStorage
>>> import ZODB.blob, transaction
>>> from ZODB.blob import Blob, BlobStorage
>>> from ZODB.DB import DB
>>> import transaction
>>> base_storage = MappingStorage("test")
>>> blob_dir = 'blobs'
>>> blob_dir = 'blobs'
>>> blob_storage =
BlobStorage(blob_dir, base_storage
)
>>> blob_storage =
create_storage(blob_dir=blob_dir
)
>>> database = DB(blob_storage)
>>> database =
ZODB.
DB(blob_storage)
>>> connection1 = database.open()
>>> connection1 = database.open()
>>> root1 = connection1.root()
>>> root1 = connection1.root()
Putting a Blob into a Connection works like any other Persistent object::
Putting a Blob into a Connection works like any other Persistent object::
>>> blob1 = Blob()
>>> blob1 =
ZODB.blob.
Blob()
>>> blob1.open('w').write('this is blob 1')
>>> blob1.open('w').write('this is blob 1')
>>> root1['blob1'] = blob1
>>> root1['blob1'] = blob1
>>> 'blob1' in root1
>>> 'blob1' in root1
...
@@ -130,7 +126,7 @@ when we start)::
...
@@ -130,7 +126,7 @@ when we start)::
We can open more than one blob object during the course of a single
We can open more than one blob object during the course of a single
transaction::
transaction::
>>> blob2 = Blob()
>>> blob2 =
ZODB.blob.
Blob()
>>> blob2.open('w').write('this is blob 3')
>>> blob2.open('w').write('this is blob 3')
>>> root2['blob2'] = blob2
>>> root2['blob2'] = blob2
>>> transaction.commit()
>>> transaction.commit()
...
@@ -189,16 +185,6 @@ connections::
...
@@ -189,16 +185,6 @@ connections::
>>> root4['blob1'].open('r').read()
>>> root4['blob1'].open('r').read()
'this is blob 1woot!this is from connection 3'
'this is blob 1woot!this is from connection 3'
BlobStorages implementation of getSize() does not include the blob data and
only returns what the underlying storages do. (We need to ensure the last
number to be an int, otherwise it will be a long on 32-bit platforms and an
int on 64-bit)::
>>> underlying_size = base_storage.getSize()
>>> blob_size = blob_storage.getSize()
>>> int(blob_size - underlying_size)
0
You can't commit a transaction while blob files are open:
You can't commit a transaction while blob files are open:
>>> f = root3['blob1'].open('w')
>>> f = root3['blob1'].open('w')
...
@@ -227,7 +213,7 @@ We do support optimistic savepoints:
...
@@ -227,7 +213,7 @@ We do support optimistic savepoints:
>>> connection5 = database.open()
>>> connection5 = database.open()
>>> root5 = connection5.root()
>>> root5 = connection5.root()
>>> blob = Blob()
>>> blob =
ZODB.blob.
Blob()
>>> blob_fh = blob.open("w")
>>> blob_fh = blob.open("w")
>>> blob_fh.write("I'm a happy blob.")
>>> blob_fh.write("I'm a happy blob.")
>>> blob_fh.close()
>>> blob_fh.close()
...
@@ -297,7 +283,7 @@ file that can be opened.
...
@@ -297,7 +283,7 @@ file that can be opened.
>>> connection6 = database.open()
>>> connection6 = database.open()
>>> root6 = connection6.root()
>>> root6 = connection6.root()
>>> blob = Blob()
>>> blob =
ZODB.blob.
Blob()
>>> blob_fh = blob.open("w")
>>> blob_fh = blob.open("w")
>>> blob_fh.write("I'm a happy blob.")
>>> blob_fh.write("I'm a happy blob.")
>>> blob_fh.close()
>>> blob_fh.close()
...
@@ -330,7 +316,7 @@ and doesn't prevent us from opening the blob for writing:
...
@@ -330,7 +316,7 @@ and doesn't prevent us from opening the blob for writing:
An exception is raised if we call committed on a blob that has
An exception is raised if we call committed on a blob that has
uncommitted changes:
uncommitted changes:
>>> blob = Blob()
>>> blob =
ZODB.blob.
Blob()
>>> blob.committed()
>>> blob.committed()
Traceback (most recent call last):
Traceback (most recent call last):
...
...
...
@@ -375,55 +361,55 @@ You can't open a committed blob file for writing:
...
@@ -375,55 +361,55 @@ You can't open a committed blob file for writing:
...
...
IOError: ...
IOError: ...
tpc_abort with dirty data
tpc_abort
-------------------------
---------
When `tpc_abort` is called during the first commit phase we need to be able to
clean up dirty files:
>>> class DummyBaseStorage(object):
... def tpc_abort(self):
... pass
>>> base_storage = DummyBaseStorage()
>>> blob_dir2 = 'blobs2'
>>> blob_storage2 = BlobStorage(blob_dir2, base_storage)
>>> committed_blob_dir = blob_storage2.fshelper.getPathForOID(0)
>>> os.makedirs(committed_blob_dir)
>>> committed_blob_file = blob_storage2.fshelper.getBlobFilename(0, 0)
>>> open(os.path.join(committed_blob_file), 'w').write('foo')
>>> os.path.exists(committed_blob_file)
True
Now, telling the storage that Blob 0 and Blob 1 (both with serial 0) are dirty
If a transaction is aborted in the middle of 2-phase commit, any data
will: remove the committed file for Blob 0 and ignore the fact that Blob 1 is
stored are discarded.
set to dirty but doesn't actually have an existing file:
>>> blob_storage2.dirty_oids = [(0, 0), (1, 0)]
>>> olddata, oldserial = blob_storage.load(blob._p_oid, '')
>>> blob_storage2.tpc_abort()
>>> t = transaction.get()
>>> os.path.exists(committed_blob_file)
>>> blob_storage.tpc_begin(t)
False
>>> open('blobfile', 'w').write('This data should go away')
>>> s1 = blob_storage.storeBlob(blob._p_oid, oldserial, olddata, 'blobfile',
... '', t)
>>> new_oid = blob_storage.new_oid()
>>> open('blobfile2', 'w').write('This data should go away too')
>>> s2 = blob_storage.storeBlob(new_oid, '\0'*8, olddata, 'blobfile2',
... '', t)
>>> blob_storage.tpc_abort(t)
Now, the serial for the existing blob should be the same:
Note: This is a counter measure against regression of bug #126007.
>>> blob_storage.load(blob._p_oid, '') == (olddata, oldserial)
True
`getSize` iterates over the existing blob files in the blob directory and adds
And we shouldn't be able to read the data that we saved:
up their size. The blob directory sometimes contains temporary files that the
getSize function needs to ignore:
>>>
garbage_file = os.path.join(blob_dir, 'garbage'
)
>>>
blob_storage.loadBlob(blob._p_oid, s1
)
>>> open(garbage_file, 'w').write('garbage')
Traceback (most recent call last):
>>> int(blob_storage.getSize())
...
2673
POSKeyError: 'No blob file'
Note: This is a counter measer against regression of bug #12991.
Of course the old data should be unaffected:
Teardown
>>> open(blob_storage.loadBlob(blob._p_oid, oldserial)).read()
--------
"I'm a happy blob."
Similarly, the new object wasn't added to the storage:
>>> blob_storage.load(new_oid, '')
Traceback (most recent call last):
...
POSKeyError: 0x06
>>> blob_storage.loadBlob(blob._p_oid, s2)
Traceback (most recent call last):
...
POSKeyError: 'No blob file'
We don't need the storage directory and databases anymore::
.. clean up
>>> tm1.abort()
>>> tm1.abort()
>>> tm2.abort()
>>> tm2.abort()
>>> database.close()
>>> database.close()
>>> rmtree(blob_dir)
>>> rmtree(blob_dir2)
src/ZODB/tests/testblob.py
View file @
0c8c1b52
...
@@ -39,10 +39,10 @@ import ZConfig
...
@@ -39,10 +39,10 @@ import ZConfig
import
ZODB.blob
import
ZODB.blob
import
ZODB.interfaces
import
ZODB.interfaces
import
ZODB.tests.IteratorStorage
import
ZODB.tests.IteratorStorage
import
ZODB.tests.StorageTestBase
import
ZODB.tests.util
import
ZODB.tests.util
import
zope.testing.renormalizing
import
zope.testing.renormalizing
def
new_time
():
def
new_time
():
"""Create a _new_ time stamp.
"""Create a _new_ time stamp.
...
@@ -95,8 +95,13 @@ class ZODBBlobConfigTest(ConfigTestBase):
...
@@ -95,8 +95,13 @@ class ZODBBlobConfigTest(ConfigTestBase):
</zodb>
</zodb>
"""
)
"""
)
class
BlobTestBase
(
ZODB
.
tests
.
StorageTestBase
.
StorageTestBase
):
def
setUp
(
self
):
ZODB
.
tests
.
StorageTestBase
.
StorageTestBase
.
setUp
(
self
)
self
.
_storage
=
self
.
create_storage
()
class
BlobCloneTests
(
ZODB
.
tests
.
util
.
TestC
ase
):
class
BlobCloneTests
(
BlobTestB
ase
):
def
testDeepCopyCanInvalidate
(
self
):
def
testDeepCopyCanInvalidate
(
self
):
"""
"""
...
@@ -104,9 +109,7 @@ class BlobCloneTests(ZODB.tests.util.TestCase):
...
@@ -104,9 +109,7 @@ class BlobCloneTests(ZODB.tests.util.TestCase):
readers and writers values in cloned objects (see
readers and writers values in cloned objects (see
http://mail.zope.org/pipermail/zodb-dev/2008-August/012054.html)
http://mail.zope.org/pipermail/zodb-dev/2008-August/012054.html)
"""
"""
base_storage
=
FileStorage
(
'Data.fs'
)
database
=
DB
(
self
.
_storage
)
blob_storage
=
BlobStorage
(
'blobs'
,
base_storage
)
database
=
DB
(
blob_storage
)
connection
=
database
.
open
()
connection
=
database
.
open
()
root
=
connection
.
root
()
root
=
connection
.
root
()
transaction
.
begin
()
transaction
.
begin
()
...
@@ -129,12 +132,10 @@ class BlobCloneTests(ZODB.tests.util.TestCase):
...
@@ -129,12 +132,10 @@ class BlobCloneTests(ZODB.tests.util.TestCase):
database
.
close
()
database
.
close
()
class
BlobUndoTests
(
ZODB
.
tests
.
util
.
TestC
ase
):
class
BlobUndoTests
(
BlobTestB
ase
):
def
testUndoWithoutPreviousVersion
(
self
):
def
testUndoWithoutPreviousVersion
(
self
):
base_storage
=
FileStorage
(
'Data.fs'
)
database
=
DB
(
self
.
_storage
)
blob_storage
=
BlobStorage
(
'blobs'
,
base_storage
)
database
=
DB
(
blob_storage
)
connection
=
database
.
open
()
connection
=
database
.
open
()
root
=
connection
.
root
()
root
=
connection
.
root
()
transaction
.
begin
()
transaction
.
begin
()
...
@@ -149,9 +150,7 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
...
@@ -149,9 +150,7 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
database
.
close
()
database
.
close
()
def
testUndo
(
self
):
def
testUndo
(
self
):
base_storage
=
FileStorage
(
'Data.fs'
)
database
=
DB
(
self
.
_storage
)
blob_storage
=
BlobStorage
(
'blobs'
,
base_storage
)
database
=
DB
(
blob_storage
)
connection
=
database
.
open
()
connection
=
database
.
open
()
root
=
connection
.
root
()
root
=
connection
.
root
()
transaction
.
begin
()
transaction
.
begin
()
...
@@ -173,9 +172,7 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
...
@@ -173,9 +172,7 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
database
.
close
()
database
.
close
()
def
testUndoAfterConsumption
(
self
):
def
testUndoAfterConsumption
(
self
):
base_storage
=
FileStorage
(
'Data.fs'
)
database
=
DB
(
self
.
_storage
)
blob_storage
=
BlobStorage
(
'blobs'
,
base_storage
)
database
=
DB
(
blob_storage
)
connection
=
database
.
open
()
connection
=
database
.
open
()
root
=
connection
.
root
()
root
=
connection
.
root
()
transaction
.
begin
()
transaction
.
begin
()
...
@@ -199,9 +196,7 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
...
@@ -199,9 +196,7 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
database
.
close
()
database
.
close
()
def
testRedo
(
self
):
def
testRedo
(
self
):
base_storage
=
FileStorage
(
'Data.fs'
)
database
=
DB
(
self
.
_storage
)
blob_storage
=
BlobStorage
(
'bobs'
,
base_storage
)
database
=
DB
(
blob_storage
)
connection
=
database
.
open
()
connection
=
database
.
open
()
root
=
connection
.
root
()
root
=
connection
.
root
()
blob
=
Blob
()
blob
=
Blob
()
...
@@ -221,8 +216,6 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
...
@@ -221,8 +216,6 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
self
.
assertEqual
(
blob
.
open
(
'r'
).
read
(),
'this is state 1'
)
self
.
assertEqual
(
blob
.
open
(
'r'
).
read
(),
'this is state 1'
)
serial
=
base64
.
encodestring
(
blob_storage
.
_tid
)
database
.
undo
(
database
.
undoLog
(
0
,
1
)[
0
][
'id'
])
database
.
undo
(
database
.
undoLog
(
0
,
1
)[
0
][
'id'
])
transaction
.
commit
()
transaction
.
commit
()
...
@@ -231,9 +224,7 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
...
@@ -231,9 +224,7 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
database
.
close
()
database
.
close
()
def
testRedoOfCreation
(
self
):
def
testRedoOfCreation
(
self
):
base_storage
=
FileStorage
(
'Data.fs'
)
database
=
DB
(
self
.
_storage
)
blob_storage
=
BlobStorage
(
'blobs'
,
base_storage
)
database
=
DB
(
blob_storage
)
connection
=
database
.
open
()
connection
=
database
.
open
()
root
=
connection
.
root
()
root
=
connection
.
root
()
blob
=
Blob
()
blob
=
Blob
()
...
@@ -256,20 +247,16 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
...
@@ -256,20 +247,16 @@ class BlobUndoTests(ZODB.tests.util.TestCase):
database
.
close
()
database
.
close
()
class
RecoveryBlobStorage
(
ZODB
.
tests
.
util
.
TestC
ase
,
class
RecoveryBlobStorage
(
BlobTestB
ase
,
ZODB
.
tests
.
IteratorStorage
.
IteratorDeepCompare
):
ZODB
.
tests
.
IteratorStorage
.
IteratorDeepCompare
):
def
setUp
(
self
):
def
setUp
(
self
):
ZODB
.
tests
.
util
.
TestCase
.
setUp
(
self
)
BlobTestBase
.
setUp
(
self
)
self
.
_storage
=
BlobStorage
(
self
.
_dst
=
self
.
create_storage
(
'dest'
)
'src_blobs'
,
ZODB
.
FileStorage
.
FileStorage
(
"Source.fs"
,
create
=
True
))
self
.
_dst
=
BlobStorage
(
'dest_blobs'
,
ZODB
.
FileStorage
.
FileStorage
(
"Dest.fs"
,
create
=
True
))
def
tearDown
(
self
):
def
tearDown
(
self
):
self
.
_storage
.
close
()
self
.
_dst
.
close
()
self
.
_dst
.
close
()
ZODB
.
tests
.
util
.
TestC
ase
.
tearDown
(
self
)
BlobTestB
ase
.
tearDown
(
self
)
# Requires a setUp() that creates a self._dst destination storage
# Requires a setUp() that creates a self._dst destination storage
def
testSimpleBlobRecovery
(
self
):
def
testSimpleBlobRecovery
(
self
):
...
@@ -299,7 +286,6 @@ class RecoveryBlobStorage(ZODB.tests.util.TestCase,
...
@@ -299,7 +286,6 @@ class RecoveryBlobStorage(ZODB.tests.util.TestCase,
def
gc_blob_removes_uncommitted_data
():
def
gc_blob_removes_uncommitted_data
():
"""
"""
>>> from ZODB.blob import Blob
>>> blob = Blob()
>>> blob = Blob()
>>> blob.open('w').write('x')
>>> blob.open('w').write('x')
>>> fname = blob._p_blob_uncommitted
>>> fname = blob._p_blob_uncommitted
...
@@ -323,19 +309,14 @@ def commit_from_wrong_partition():
...
@@ -323,19 +309,14 @@ def commit_from_wrong_partition():
>>> os_rename = os.rename
>>> os_rename = os.rename
>>> os.rename = fail
>>> os.rename = fail
>>> import logging
, sys
>>> import logging
>>> logger = logging.getLogger('ZODB.blob.copied')
>>> logger = logging.getLogger('ZODB.blob.copied')
>>> handler = logging.StreamHandler(sys.stdout)
>>> handler = logging.StreamHandler(sys.stdout)
>>> logger.propagate = False
>>> logger.propagate = False
>>> logger.setLevel(logging.DEBUG)
>>> logger.setLevel(logging.DEBUG)
>>> logger.addHandler(handler)
>>> logger.addHandler(handler)
>>> import transaction
>>> blob_storage = create_storage()
>>> from ZODB.MappingStorage import MappingStorage
>>> from ZODB.blob import BlobStorage
>>> from ZODB.DB import DB
>>> base_storage = MappingStorage("test")
>>> blob_storage = BlobStorage('blobs', base_storage)
>>> database = DB(blob_storage)
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> connection = database.open()
>>> root = connection.root()
>>> root = connection.root()
...
@@ -378,13 +359,10 @@ def packing_with_uncommitted_data_non_undoing():
...
@@ -378,13 +359,10 @@ def packing_with_uncommitted_data_non_undoing():
temporary directory that is ignored while packing.
temporary directory that is ignored while packing.
>>> import transaction
>>> import transaction
>>> from ZODB.MappingStorage import MappingStorage
>>> from ZODB.blob import BlobStorage
>>> from ZODB.DB import DB
>>> from ZODB.DB import DB
>>> from ZODB.serialize import referencesf
>>> from ZODB.serialize import referencesf
>>> base_storage = MappingStorage("test")
>>> blob_storage = create_storage()
>>> blob_storage = BlobStorage('blobs', base_storage)
>>> database = DB(blob_storage)
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> connection = database.open()
>>> root = connection.root()
>>> root = connection.root()
...
@@ -409,14 +387,9 @@ def packing_with_uncommitted_data_undoing():
...
@@ -409,14 +387,9 @@ def packing_with_uncommitted_data_undoing():
blob_directory and confused our packing strategy. We now use a separate
blob_directory and confused our packing strategy. We now use a separate
temporary directory that is ignored while packing.
temporary directory that is ignored while packing.
>>> import transaction
>>> from ZODB.FileStorage.FileStorage import FileStorage
>>> from ZODB.blob import BlobStorage
>>> from ZODB.DB import DB
>>> from ZODB.serialize import referencesf
>>> from ZODB.serialize import referencesf
>>> base_storage = FileStorage('Data.fs')
>>> blob_storage = create_storage()
>>> blob_storage = BlobStorage('blobs', base_storage)
>>> database = DB(blob_storage)
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> connection = database.open()
>>> root = connection.root()
>>> root = connection.root()
...
@@ -438,12 +411,7 @@ def secure_blob_directory():
...
@@ -438,12 +411,7 @@ def secure_blob_directory():
This is a test for secure creation and verification of secure settings of
This is a test for secure creation and verification of secure settings of
blob directories.
blob directories.
>>> from ZODB.FileStorage.FileStorage import FileStorage
>>> blob_storage = create_storage(blob_dir='blobs')
>>> from ZODB.blob import BlobStorage
>>> import os.path
>>> base_storage = FileStorage('Data.fs')
>>> blob_storage = BlobStorage('blobs', base_storage)
Two directories are created:
Two directories are created:
...
@@ -493,14 +461,7 @@ def loadblob_tmpstore():
...
@@ -493,14 +461,7 @@ def loadblob_tmpstore():
First, let's setup a regular database and store a blob:
First, let's setup a regular database and store a blob:
>>> import transaction
>>> blob_storage = create_storage()
>>> from ZODB.FileStorage.FileStorage import FileStorage
>>> from ZODB.blob import BlobStorage
>>> from ZODB.DB import DB
>>> from ZODB.serialize import referencesf
>>> base_storage = FileStorage('Data.fs')
>>> blob_storage = BlobStorage('blobs', base_storage)
>>> database = DB(blob_storage)
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> connection = database.open()
>>> root = connection.root()
>>> root = connection.root()
...
@@ -533,15 +494,14 @@ def loadblob_tmpstore():
...
@@ -533,15 +494,14 @@ def loadblob_tmpstore():
def
is_blob_record
():
def
is_blob_record
():
r"""
r"""
>>> fs = FileStorage('Data.fs')
>>> bs = create_storage()
>>> bs = ZODB.blob.BlobStorage('blobs', fs)
>>> db = DB(bs)
>>> db = DB(bs)
>>> conn = db.open()
>>> conn = db.open()
>>> conn.root()['blob'] = ZODB.blob.Blob()
>>> conn.root()['blob'] = ZODB.blob.Blob()
>>> transaction.commit()
>>> transaction.commit()
>>> ZODB.blob.is_blob_record(
f
s.load(ZODB.utils.p64(0), '')[0])
>>> ZODB.blob.is_blob_record(
b
s.load(ZODB.utils.p64(0), '')[0])
False
False
>>> ZODB.blob.is_blob_record(
f
s.load(ZODB.utils.p64(1), '')[0])
>>> ZODB.blob.is_blob_record(
b
s.load(ZODB.utils.p64(1), '')[0])
True
True
An invalid pickle yields a false value:
An invalid pickle yields a false value:
...
@@ -558,8 +518,7 @@ def is_blob_record():
...
@@ -558,8 +518,7 @@ def is_blob_record():
def
do_not_depend_on_cwd
():
def
do_not_depend_on_cwd
():
"""
"""
>>> from ZODB.MappingStorage import MappingStorage
>>> bs = create_storage()
>>> bs = ZODB.blob.BlobStorage('blobs', MappingStorage())
>>> here = os.getcwd()
>>> here = os.getcwd()
>>> os.mkdir('evil')
>>> os.mkdir('evil')
>>> os.chdir('evil')
>>> os.chdir('evil')
...
@@ -578,12 +537,67 @@ def setUp(test):
...
@@ -578,12 +537,67 @@ def setUp(test):
ZODB
.
tests
.
util
.
setUp
(
test
)
ZODB
.
tests
.
util
.
setUp
(
test
)
test
.
globs
[
'rmtree'
]
=
zope
.
testing
.
setupstack
.
rmtree
test
.
globs
[
'rmtree'
]
=
zope
.
testing
.
setupstack
.
rmtree
def
setUpBlobAdaptedFileStorage
(
test
):
setUp
(
test
)
def
create_storage
(
name
=
'data'
,
blob_dir
=
None
):
if
blob_dir
is
None
:
blob_dir
=
'%s.bobs'
%
name
return
ZODB
.
blob
.
BlobStorage
(
blob_dir
,
FileStorage
(
'%s.fs'
%
name
))
test
.
globs
[
'create_storage'
]
=
create_storage
def
storage_reusable_suite
(
prefix
,
factory
):
"""Return a test suite for a generic IBlobStorage.
Pass a factory taking a name and a blob directory name.
"""
def
setup
(
test
):
setUp
(
test
)
def
create_storage
(
name
=
'data'
,
blob_dir
=
None
):
if
blob_dir
is
None
:
blob_dir
=
'%s.bobs'
%
name
return
factory
(
name
,
blob_dir
)
test
.
globs
[
'create_storage'
]
=
create_storage
suite
=
unittest
.
TestSuite
()
suite
.
addTest
(
doctest
.
DocFileSuite
(
"blob_connection.txt"
,
"blob_importexport.txt"
,
"blob_transaction.txt"
,
setUp
=
setup
,
tearDown
=
zope
.
testing
.
setupstack
.
tearDown
,
optionflags
=
doctest
.
ELLIPSIS
,
))
suite
.
addTest
(
doctest
.
DocTestSuite
(
setUp
=
setup
,
tearDown
=
zope
.
testing
.
setupstack
.
tearDown
,
checker
=
zope
.
testing
.
renormalizing
.
RENormalizing
([
(
re
.
compile
(
r'\
%(sep)s
\%(sep)s'
%
dict
(
sep
=
os
.
path
.
sep
)),
'/'
),
(
re
.
compile
(
r'\
%(sep)s
' % dict(sep=os.path.sep)), '
/
'),
]),
))
def create_storage(self, name='
data
', blob_dir=None):
if blob_dir is None:
blob_dir = '
%
s
.
bobs
' % name
return factory(name, blob_dir)
for class_ in (BlobCloneTests, BlobUndoTests, RecoveryBlobStorage):
new_class = class_.__class__(
prefix+class_.__name__, (class_, ),
dict(create_storage=create_storage),
)
suite.addTest(unittest.makeSuite(new_class))
return suite
def test_suite():
def test_suite():
suite = unittest.TestSuite()
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ZODBBlobConfigTest))
suite.addTest(unittest.makeSuite(ZODBBlobConfigTest))
suite.addTest(doctest.DocFileSuite(
suite.addTest(doctest.DocFileSuite(
"blob_basic.txt"
,
"blob_connection.txt"
,
"blob_transaction.txt"
,
"blob_basic.txt",
"blob_packing.txt"
,
"blob_
importexport.txt"
,
"blob_
consume.txt"
,
"blob_packing.txt", "blob_consume.txt",
"blob_tempdir.txt",
"blob_tempdir.txt",
setUp=setUp,
setUp=setUp,
tearDown=zope.testing.setupstack.tearDown,
tearDown=zope.testing.setupstack.tearDown,
...
@@ -600,17 +614,11 @@ def test_suite():
...
@@ -600,17 +614,11 @@ def test_suite():
(re.compile(r'
\
S
+/
((
old
|
bushy
|
lawn
)
/
\
S
+/
foo
[
23456
]
?
)
'), r'
\
1
'),
(re.compile(r'
\
S
+/
((
old
|
bushy
|
lawn
)
/
\
S
+/
foo
[
23456
]
?
)
'), r'
\
1
'),
]),
]),
))
))
suite.addTest(doctest.DocTestSuite(
suite.addTest(storage_reusable_suite(
setUp=setUp,
'
BlobAdaptedFileStorage
',
tearDown=zope.testing.setupstack.tearDown,
lambda name, blob_dir:
checker = zope.testing.renormalizing.RENormalizing([
ZODB.blob.BlobStorage(blob_dir, FileStorage('
%
s
.
fs
' % name))
(re.compile(r'
\
%
(
sep
)
s
\
%
(
sep
)
s
' % dict(sep=os.path.sep)), '
/
'),
(re.compile(r'
\
%
(
sep
)
s
' % dict(sep=os.path.sep)), '
/
'),
]),
))
))
suite.addTest(unittest.makeSuite(BlobCloneTests))
suite.addTest(unittest.makeSuite(BlobUndoTests))
suite.addTest(unittest.makeSuite(RecoveryBlobStorage))
return suite
return suite
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment