Commit 00ab5fb4 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent 0b0d80dd
......@@ -27,6 +27,7 @@ import transaction
from transaction import TransactionManager
from ZODB.POSException import ConflictError
from numpy import ndarray, array_equal, uint32, zeros, arange
from golang import defer, func
from threading import Thread
from six.moves import _thread
from six import b
......@@ -349,12 +350,14 @@ def test_bigfile_filezodb():
# connection can migrate between threads handling requests.
# verify _ZBigFileH properly adjusts.
# ( NOTE this test is almost dupped at test_zbigarray_vs_conn_migration() )
@func
def test_bigfile_filezodb_vs_conn_migration():
root01 = dbopen()
conn01 = root01._p_jar
db = conn01.db()
conn01.close()
del root01
defer(db.close)
c12_1 = NotifyChannel() # T11 -> T21
c21_1 = NotifyChannel() # T21 -> T11
......@@ -545,17 +548,18 @@ def test_bigfile_filezodb_vs_conn_migration():
assert Blk(vma03, 0)[0] == 22
del vma03, fh03, f03
dbclose(root03)
# ZBlk should properly handle 'invalidate' messages from DB
# ( NOTE this test is almost dupped at test_zbigarray_vs_cache_invalidation() )
@func
def test_bigfile_filezodb_vs_cache_invalidation():
root = dbopen()
conn = root._p_jar
db = conn.db()
conn.close()
del root, conn
defer(db.close)
tm1 = TransactionManager()
tm2 = TransactionManager()
......@@ -601,6 +605,7 @@ def test_bigfile_filezodb_vs_cache_invalidation():
ram_reclaim_all()
assert Blk(vma2, 0)[0] == 1
# """
# FIXME: this simulates ZODB Connection cache pressure and currently
# removes ZBlk corresponding to blk #0 from conn2 cache.
# In turn this leads to conn2 missing that block invalidation on follow-up
......@@ -608,25 +613,24 @@ def test_bigfile_filezodb_vs_cache_invalidation():
#
# See FIXME notes on ZBlkBase._p_invalidate() for detailed description.
conn2._cache.minimize()
# """
tm2.commit() # transaction boundary for t2
# data from tm1 should propagate -> ZODB -> ram pages for _ZBigFileH in conn2
assert Blk(vma2, 0)[0] == 2
conn2.close()
del conn2, root2
dbclose(root1)
# verify that conflicts on ZBlk are handled properly
# ( NOTE this test is almost dupped at test_zbigarray_vs_conflicts() )
@func
def test_bigfile_filezodb_vs_conflicts():
root = dbopen()
conn = root._p_jar
db = conn.db()
conn.close()
del root, conn
defer(db.close)
tm1 = TransactionManager()
tm2 = TransactionManager()
......@@ -680,16 +684,15 @@ def test_bigfile_filezodb_vs_conflicts():
assert Blk(vma1, 0)[0] == 13 # re-read in conn1
conn2.close()
dbclose(root1)
# verify that fileh are garbage-collected after user free them
@func
def test_bigfile_filezodb_fileh_gc():
root1= dbopen()
conn1= root1._p_jar
db = conn1.db()
defer(db.close)
root1['zfile4'] = f1 = ZBigFile(blksize)
transaction.commit()
......@@ -713,12 +716,13 @@ def test_bigfile_filezodb_fileh_gc():
assert wfh1() is None # fh1 should be gone
del vma2, fh2, f2
dbclose(root2)
# verify how zblk format change works
@func
def test_bigfile_filezodb_fmt_change():
root = dbopen()
defer(lambda: dbclose(root))
root['zfile5'] = f = ZBigFile(blksize)
transaction.commit()
......@@ -751,8 +755,6 @@ def test_bigfile_filezodb_fmt_change():
finally:
file_zodb.ZBlk_fmt_write = fmt_write_save
dbclose(root)
# test that ZData are reused for changed chunks in ZBlk1 format
def test_bigfile_zblk1_zdata_reuse():
......@@ -764,8 +766,10 @@ def test_bigfile_zblk1_zdata_reuse():
finally:
file_zodb.ZBlk_fmt_write = fmt_write_save
@func
def _test_bigfile_zblk1_zdata_reuse():
root = dbopen()
defer(lambda: dbclose(root))
root['zfile6'] = f = ZBigFile(blksize)
transaction.commit()
......@@ -799,5 +803,3 @@ def _test_bigfile_zblk1_zdata_reuse():
assert len(zdata_v1) == len(zdata_v2)
for i in range(len(zdata_v1)):
assert zdata_v1[i] is zdata_v2[i]
dbclose(root)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment