Commit 451d1e3f authored by Kirill Smelkov's avatar Kirill Smelkov

X Fixes so that it compiles from scratch; tweaks to weakref

Still trying to see what is going on
parent 2de18e8e
......@@ -34,7 +34,8 @@ ifeq ($(CC),)
$(error "Cannot defermine py-CC")
endif
all : bigfile/_bigfile.so wcfs/wcfs
#all : bigfile/_bigfile.so wcfs/wcfs
all : bigfile/_bigfile.so
ccan_config := 3rdparty/ccan/config.h
......
......@@ -30,9 +30,20 @@ It provides wcfs integration for ZBigFile handles opened with _use_wcfs=True.
from __future__ import print_function, absolute_import
cdef extern from "wcfs/client/wcfs.h":
cdef extern from *:
"""
// XXX hack: C++ does not have __builtin_types_compatible_p, but CCAN configure
// thinks it does because CCAN is configured via C, not C++.
#include <config.h>
#undef HAVE_BUILTIN_TYPES_COMPATIBLE_P
#define HAVE_BUILTIN_TYPES_COMPATIBLE_P 0
#include <ccan/array_size/array_size.h>
"""
pass
#cdef extern from "wcfs/client/wcfs.h":
# pass
cdef extern from "bigfile/_bigfile.h":
struct PyBigFile:
pass
......@@ -46,12 +57,13 @@ cdef extern from "<wendelin/bigfile/file.h>" nogil:
cdef extern from * nogil:
"""
extern const bigfile_ops ZBigFile_mmap_ops;
"""
const bigfile_ops ZBigFile_mmap_ops
import wcfs as pywcfs
from wendelin.lib import zodb as pyzodb
from wcfs.client cimport _wcfs as wcfs
#from wcfs.client cimport _wcfs as wcfs
from golang cimport error, nil, pyerror
from cpython cimport PyCapsule_New
......@@ -73,7 +85,7 @@ import weakref
# - provides blkmmapper with WCFS integration.
cdef public class _ZBigFile(BigFile) [object _ZBigFile, type _ZBigFile_Type]:
cdef object zself # reference to ZBigFile
cdef wcfs.FileH wfileh # WCFS file handle. Initially nil, opened by blkmmapper
# cdef wcfs.FileH wfileh # WCFS file handle. Initially nil, opened by blkmmapper
# _new creates new _ZBigFile associated with ZBigFile zself.
# XXX Cython does not allow __new__ nor to change arguments passed to __cinit__ / __init__
......@@ -81,14 +93,14 @@ cdef public class _ZBigFile(BigFile) [object _ZBigFile, type _ZBigFile_Type]:
def _new(zself, blksize):
cdef _ZBigFile obj = _ZBigFile.__new__(_ZBigFile, blksize)
obj.zself = zself
obj.wfileh = nil
# obj.wfileh = nil
return obj
def __dealloc__(_ZBigFile zf):
cdef error err = nil
if zf.wfileh != nil:
err = zf.wfileh.close()
zf.wfileh = nil
# if zf.wfileh != nil:
# err = zf.wfileh.close()
# zf.wfileh = nil
if err != nil:
raise pyerror.from_error(err)
......@@ -107,9 +119,11 @@ cdef public class _ZBigFile(BigFile) [object _ZBigFile, type _ZBigFile_Type]:
# corresponding to ZBigFile is opened if use_wcfs=True.
def fileh_open(_ZBigFile zf, bint use_wcfs):
mmap_overlay = False
cdef wcfs.PyFileH pywfileh
# cdef wcfs.PyFileH pywfileh
if use_wcfs:
1/0
"""
mmap_overlay = True
if zf.wfileh == nil:
zconn = zf.zself._p_jar
......@@ -119,10 +133,12 @@ cdef public class _ZBigFile(BigFile) [object _ZBigFile, type _ZBigFile_Type]:
pywconn = pywconnOf(zconn)
pywfileh = pywconn.open(zf.zself._p_oid)
zf.wfileh = pywfileh.wfileh
"""
return super(_ZBigFile, zf).fileh_open(mmap_overlay)
"""
# pywconnOf establishes and returns (py) wcfs.Conn associated with zconn.
#
# returned wcfs.Conn will be maintained to keep in sync with zconn, and will be
......@@ -181,3 +197,4 @@ class ZSync:
def on_connection_resync(zsync):
zconn = zsync.zconn_ref()
zsync.wconn.resync(zconn_at(zconn))
"""
......@@ -20,13 +20,21 @@
// File file_zodb.cpp provides blkmmapper functions for _ZBigFile.
// MMapping is implemented via wcfs client.
#include "wcfs/client/wcfs.h"
// XXX hack: C++ does not have __builtin_types_compatible_p, but CCAN configure
// thinks it does because CCAN is configured via C, not C++.
#include <config.h>
#undef HAVE_BUILTIN_TYPES_COMPATIBLE_P
#define HAVE_BUILTIN_TYPES_COMPATIBLE_P 0
#include <ccan/array_size/array_size.h>
//#include "wcfs/client/wcfs.h"
#include "wendelin/bigfile/file.h"
#include "wendelin/bigfile/virtmem.h"
#include "bigfile/_bigfile.h"
#include "bigfile/_file_zodb.h"
#include <ccan/container_of/container_of.h>
/*
static int zfile_mmap_setup_read(VMA *vma, BigFile *file, blk_t blk, size_t blklen) {
_ZBigFile* _zfile = container_of(file, _ZBigFile, __pyx_base.file);
......@@ -80,6 +88,7 @@ static int zfile_munmap(VMA *vma, BigFile *file) {
return 0;
}
*/
// NOTE reusing whole bigfile_ops for just .mmap* ops.
......@@ -87,9 +96,9 @@ extern const bigfile_ops ZBigFile_mmap_ops;
static bigfile_ops _mkZBigFile_mmap_ops() {
// workaround for "sorry, unimplemented: non-trivial designated initializers not supported"
bigfile_ops _;
_.mmap_setup_read = zfile_mmap_setup_read;
_.remmap_blk_read = zfile_remmap_blk_read;
_.munmap = zfile_munmap;
_.mmap_setup_read = NULL; // zfile_mmap_setup_read;
_.remmap_blk_read = NULL; // zfile_remmap_blk_read;
_.munmap = NULL; //zfile_munmap;
_.loadblk = NULL;
_.storeblk = NULL;
return _;
......
......@@ -290,11 +290,13 @@ setup(
define_macros = [('_GNU_SOURCE',None), ('BUILDING_LIBVIRTMEM',None)],
language = 'c'),
DSO('wendelin.wcfs.client.libwcfs',
['wcfs/client/wcfs.cpp',
'wcfs/client/wcfs_watchlink.cpp',
'wcfs/client/wcfs_misc.cpp'],
dsos = ['wendelin.bigfile.libvirtmem'])],
#DSO('wendelin.wcfs.client.libwcfs',
# ['wcfs/client/wcfs.cpp',
# 'wcfs/client/wcfs_watchlink.cpp',
# 'wcfs/client/wcfs_misc.cpp'],
# dsos = ['wendelin.bigfile.libvirtmem'])
],
ext_modules = [
PyGoExt('wendelin.bigfile._bigfile',
......@@ -306,20 +308,21 @@ setup(
PyGoExt('wendelin.bigfile._file_zodb',
['bigfile/_file_zodb.pyx',
'bigfile/file_zodb.cpp'],
dsos = ['wendelin.wcfs.client.libwcfs']),
dsos = []),
#dsos = ['wendelin.wcfs.client.libwcfs']),
PyGoExt('wendelin.wcfs.client._wcfs',
['wcfs/client/_wcfs.pyx'],
dsos = ['wendelin.wcfs.client.libwcfs']),
#PyGoExt('wendelin.wcfs.client._wcfs',
# ['wcfs/client/_wcfs.pyx'],
# dsos = ['wendelin.wcfs.client.libwcfs']),
PyGoExt('wendelin.wcfs.internal.wcfs_test',
['wcfs/internal/wcfs_test.pyx']),
#PyGoExt('wendelin.wcfs.internal.wcfs_test',
# ['wcfs/internal/wcfs_test.pyx']),
Extension('wendelin.wcfs.internal.io',
['wcfs/internal/io.pyx']),
#Extension('wendelin.wcfs.internal.io',
# ['wcfs/internal/io.pyx']),
Extension('wendelin.wcfs.internal.mm',
['wcfs/internal/mm.pyx']),
#Extension('wendelin.wcfs.internal.mm',
# ['wcfs/internal/mm.pyx']),
],
package_dir = {'wendelin': ''},
......
......@@ -5,6 +5,7 @@ export GOTRACEBACK=crash
ulimit -c unlimited
CGO_ENABLED=0 go test -mod=vendor -c
#CGO_ENABLED=1 go test -mod=vendor -c -race
#export DBTail_SEED=1602769537289632682
cwd=$(pwd)
......@@ -19,6 +20,9 @@ function runtest1() {
#GOGC=0 $cwd/wcfs.test -test.v -test.count=100 -test.run 'TestZBlk|TestΔBTail' || break
#GOGC=0 $cwd/wcfs.test -test.v -test.count=100 -test.run 'TestZBlk' || break
GOGC=0 $cwd/wcfs.test -test.v -test.count=100 -test.run 'TestΔBTail' || break
#GOGC=0 GODEBUG=gccheckmark=1 $cwd/wcfs.test -test.v -test.count=100 -test.run 'TestΔBTail' || break
#GOGC=0 GODEBUG=gcstoptheworld=2 $cwd/wcfs.test -test.v -test.count=100 -test.run 'TestΔBTail' || break
#XXX asyncpreemptoff=1
done
}
......
# -*- coding: utf-8 -*-
# Copyright (C) 2020 Nexedi SA and Contributors.
# Kirill Smelkov <kirr@nexedi.com>
#
# This program is free software: you can Use, Study, Modify and Redistribute
# it under the terms of the GNU General Public License version 3, or (at your
# option) any later version, as published by the Free Software Foundation.
#
# You can also Link and Combine this program with other software covered by
# the terms of any of the Free Software licenses or any of the Open Source
# Initiative approved licenses and Convey the resulting work. Corresponding
# source of such a combination shall include the source code for all other
# software used.
#
# This program is distributed WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See COPYING file for full licensing terms.
# See https://www.nexedi.com/licensing for rationale and options.
from __future__ import print_function, absolute_import
from wendelin.wcfs.internal import xbtree
from BTrees.LOBTree import LOBTree
from BTrees.IIBTree import IITreeSet, IISet
from BTrees.tests import testBTrees
from BTrees import check as zbcheck
from BTrees.check import BTREE_EMPTY, BTREE_ONE, BTREE_NORMAL
from ZODB.MappingStorage import MappingStorage
from ZODB import DB
from persistent import Persistent
import transaction
from pytest import raises
inf = float('inf')
# T/B are shorthands for Tree and Bucket without values.
# Bv is shorthand for Bucket with values.
T = xbtree.Tree
B = lambda *keyv: xbtree.Bucket(keyv, None)
Bv = lambda keyv, *valuev: xbtree.Bucket(keyv, valuev)
# buildDegenerateZTree builds ztree with known degenerate topology, see:
# https://github.com/zopefoundation/ZODB/commit/6cd24e99f89b
# https://github.com/zopefoundation/BTrees/blob/4.7.2-1-g078ba60/BTrees/tests/testBTrees.py#L20-L57
def buildDegenerateZTree():
ztree, keys = testBTrees.DegenerateBTree("testBasicOps")._build_degenerate_tree()
assert keys == [1, 3, 5, 7, 11]
assert xbtree.StructureOf(ztree) == T([4],
T([2],
T([], B(1)), T([], B(3))),
T([],
T([6, 10],
T([], T([], B(5))),
T([], B(7)),
T([], B(11))) ))
return ztree
def test_structureOf():
# empty tree
t = LOBTree()
assert xbtree.StructureOf(t) == T([], Bv([]))
# tree with 1 k->v
t = LOBTree()
t[10] = 'hello'
assert xbtree.StructureOf(t) == T([], Bv([10], 'hello'))
# known degenerate topology
t = buildDegenerateZTree()
assert xbtree.StructureOf(t) == T([4],
T([2],
T([], B(1)), T([], B(3))),
T([],
T([6, 10],
T([], T([], B(5))),
T([], B(7)),
T([], B(11))) ))
def test_topoEncoding():
def X(tree):
topo = xbtree.TopoEncode(tree)
t2 = xbtree.TopoDecode(topo)
assert t2 == tree
return topo
assert X(T([], B())) == 'T/B'
assert X(T([], B(1))) == 'T/B1'
assert X(T([], B(1,3))) == 'T/B1,3'
assert X(T([], T([], B()))) == 'T/T/B'
assert X(T([3],
T([], B(1)),
T([],
T([5], B(), B(7,8,9))))) == "T3/T-T/B1-T5/B-B7,8,9"
# degenerate btree from ZODB
assert X(T([4],
T([2],
T([], B(1)), T([], B(3))),
T([],
T([6, 10],
T([], T([], B(5))),
T([], B(7)),
T([], B(11))) ))) == "T4/T2-T/T-T-T6,10/B1-B3-T-T-T/T-B7-B11/B5"
# tree with key->value
assert X(T([], Bv([]))) == 'T/B:'
assert X(T([], Bv([1], 4))) == 'T/B1:4'
assert X(T([], Bv([1,2], 4,5))) == 'T/B1:4,2:5'
assert X(T([3],
Bv([1], 10),
Bv([4,5], 11,12))) == 'T3/B1:10-B4:11,5:12'
# TopoEncode/TopoDecode on autogenerated topologies.
for tree in xbtree.AllStructs([1,3,7,8], 1,2, allowEmptyBuckets=True):
t2 = xbtree.TopoDecode(xbtree.TopoEncode(tree))
assert t2 == tree
def test_allStructs():
# X = AllStructs(..., allowEmptyBuckets=True)
# Y = AllStructs(..., allowEmptyBuckets=False)
# XY = X = Y + assert X == Y
def X(keys, maxdepth, maxsplit, allowEmptyBuckets=True, kv=None):
return list(xbtree.AllStructs(keys, maxdepth, maxsplit, allowEmptyBuckets, kv))
def Y(keys, maxdepth, maxsplit, kv=None):
return X(keys, maxdepth, maxsplit, allowEmptyBuckets=False, kv=kv)
def XY(keys, maxdepth, maxsplit, kv=None):
x = X(keys, maxdepth, maxsplit, kv=kv)
y = Y(keys, maxdepth, maxsplit, kv=kv)
assert x == y
return x
assert XY([], 0, 0) == [ T([], B()) ]
assert XY([1], 0, 0) == [ T([], B(1)) ]
assert XY([1,3], 0, 0) == [ T([], B(1,3)) ]
assert XY([], 0, 1) == [ T([], B()) ] # nothing to split
assert X([], 1, 0) == [ T([], B()),
T([],
T([], B())) ]
assert Y([], 1, 0) == [ T([], B()) ]
assert X([], 2, 0) == [ T([], B()),
T([],
T([], B())),
T([],
T([],
T([], B()))) ]
assert Y([], 2, 0) == [ T([], B()) ]
assert XY([1,3], 0, 0) == [ T([], B(1,3)) ]
assert X([1,3], 0, 1) == [ T([], B(1,3)),
# nsplit=1
T([0], B(), B(1,3)),
T([1], B(), B(1,3)),
T([2], B(1), B(3)),
T([3], B(1), B(3)),
T([4], B(1,3), B()),
]
assert Y([1,3], 0, 1) == [ T([], B(1,3)),
# nsplit=1
T([2], B(1), B(3)),
T([3], B(1), B(3)),
]
assert XY([1,3], 1, 0) == [ T([], B(1,3)),
# depth=1
T([],
T([], B(1,3)))
]
assert X([1,3], 1, 1) == [
# T/
T([], B(1,3)), # nsplit=0
T([], # nsplit=0,0
T([], B(1,3))),
T([], # nsplit=0,1
T([0], B(), B(1,3))),
T([],
T([1], B(), B(1,3))),
T([],
T([2], B(1), B(3))),
T([],
T([3], B(1), B(3))),
T([],
T([4], B(1,3), B())),
# T0/
T([0], B(), B(1,3)), # nsplit=1
T([0], # nsplit=1,(0,0)
T([], B()),
T([], B(1,3))),
T([0], # nsplit=1,(0,1)
T([], B()),
T([1], B(), B(1,3))),
T([0],
T([], B()),
T([2], B(1), B(3))),
T([0],
T([], B()),
T([3], B(1), B(3))),
T([0],
T([], B()),
T([4], B(1,3), B())),
# nsplit=1,(1,*) -> ø
# T1/
T([1], B(), B(1,3)), # nsplit=1
T([1], # nsplit=1,(0,0)
T([], B()),
T([], B(1,3))),
T([1],
T([], B()), # nsplit=1,(0,1)
T([2], B(1), B(3))),
T([1],
T([], B()),
T([3], B(1), B(3))),
T([1],
T([], B()),
T([4], B(1,3), B())),
T([1], # nsplit=1,(1,0)
T([0], B(), B()),
T([], B(1,3))),
T([1], # nsplit=1,(1,1)
T([0], B(), B()),
T([2], B(1), B(3))),
T([1],
T([0], B(), B()),
T([3], B(1), B(3))),
T([1],
T([0], B(), B()),
T([4], B(1,3), B())),
# T2/
T([2], B(1), B(3)), # nsplit=1
T([2], # nsplit=1,(0,0)
T([], B(1)),
T([], B(3))),
T([2], # nsplit=1,(0,1)
T([], B(1)),
T([3], B(), B(3))),
T([2],
T([], B(1)),
T([4], B(3), B())),
T([2], # nsplit=1,(1,0)
T([0], B(), B(1)),
T([], B(3))),
T([2], # nsplit=1,(1,1)
T([0], B(), B(1)),
T([3], B(), B(3))),
T([2],
T([0], B(), B(1)),
T([4], B(3), B())),
T([2], # nsplit=1,(1,0)
T([1], B(), B(1)),
T([], B(3))),
T([2], # nsplit=1,(1,1)
T([1], B(), B(1)),
T([3], B(), B(3))),
T([2],
T([1], B(), B(1)),
T([4], B(3), B())),
# T3/
T([3], B(1), B(3)), # nsplit=1
T([3], # nsplit=1,(0,0)
T([], B(1)),
T([], B(3))),
T([3], # nsplit=1,(0,1)
T([], B(1)),
T([4], B(3), B())),
T([3], # nsplit=1,(1,0)
T([0], B(), B(1)),
T([], B(3))),
T([3], # nsplit=1,(1,1)
T([0], B(), B(1)),
T([4], B(3), B())),
T([3], # nsplit=1,(1,0)
T([1], B(), B(1)),
T([], B(3))),
T([3], # nsplit=1,(1,1)
T([1], B(), B(1)),
T([4], B(3), B())),
T([3], # nsplit=1,(1,0)
T([2], B(1), B()),
T([], B(3))),
T([3], # nsplit=1,(1,1)
T([2], B(1), B()),
T([4], B(3), B())),
# T4/
T([4], B(1,3), B()), # nsplit=1
T([4], # nsplit=1,(0,0)
T([], B(1,3)),
T([], B())),
# nsplit=1,(0,1) -> ø
T([4], # nsplit=1,(1,0)
T([0], B(), B(1,3)),
T([], B())),
T([4],
T([1], B(), B(1,3)),
T([], B())),
T([4],
T([2], B(1), B(3)),
T([], B())),
T([4],
T([3], B(1), B(3)),
T([], B())),
# nsplit=1,(1,1) -> ø
]
assert Y([1,3], 1, 1) == [
# T/
T([], B(1,3)), # nsplit=0
T([], # nsplit=0,0
T([], B(1,3))),
T([], # nsplit=0,1
T([2], B(1), B(3))),
T([],
T([3], B(1), B(3))),
# T0/
# nothing - leftmost bucket is always empty
# T1/
# nothing - leftmost bucket is always empty
# T2/
T([2], B(1), B(3)), # nsplit=1
T([2], # nsplit=1,(0,0)
T([], B(1)),
T([], B(3))),
# T3/
T([3], B(1), B(3)), # nsplit=1
T([3], # nsplit=1,(0,0)
T([], B(1)),
T([], B(3))),
# T4/
# nothing - rightmost bucket is always empty
]
# TODO test for maxsplit=2 / maxdepth=2 vvv
def TY(keys, maxdepth, maxsplit, kv=None):
yv = Y(keys, maxdepth, maxsplit, kv=kv)
return list([xbtree.TopoEncode(_, vencode=lambda v: v) for _ in yv])
assert TY([1,3], 1, 1) == [
'T/B1,3',
'T/T/B1,3',
'T/T2/B1-B3',
'T/T3/B1-B3',
'T2/B1-B3',
'T2/T-T/B1-B3',
'T3/B1-B3',
'T3/T-T/B1-B3',
]
# with values
assert TY([1,3], 1,1, kv={1:'a',3:'c'}) == [
'T/B1:a,3:c',
'T/T/B1:a,3:c',
'T/T2/B1:a-B3:c',
'T/T3/B1:a-B3:c',
'T2/B1:a-B3:c',
'T2/T-T/B1:a-B3:c',
'T3/B1:a-B3:c',
'T3/T-T/B1:a-B3:c',
]
# XBlk simulates ZBlk without xbtree_test.py depending on file_zodb.py
class XBlk(Persistent):
def __init__(self, data):
self.data = data
def __str__(self):
return 'X%s' % self.data
__repr__ = __str__
# XLOTree is like LOBTree but with small max tree and bucket node sizes.
# It's tree and bucket nodes are split often on regular tree updates.
class XLOTree(LOBTree):
#_bucket_type = XLOBucket
max_leaf_size = 2
max_internal_size = 2
zbcheck._type2kind[XLOTree] = (zbcheck.TYPE_BTREE, True)
zbcheck._btree2bucket[XLOTree] = XLOTree._bucket_type
def crack_btree(ztree):
assert xbtree._zclassify(ztree).is_ztree, ztree
return zbcheck.crack_btree(ztree, is_mapping=True)
def crack_bucket(zbucket):
assert xbtree._zclassify(zbucket).is_zbucket, zbucket
return zbcheck.crack_bucket(zbucket, is_mapping=True)
# assertT asserts that znode is normal tree node + has specified keys and children.
#
# by default children are checked exactly via "is"
# if a child is represented as 'T' or 'B' - it is only verified to be of tree
# or bucket type correspondingly.
def assertT(znode, keyv, *children): # -> [] of children marked with 'T'/'B'
_ = xbtree._zclassify(znode)
assert _.is_ztree
kind, keys, kids = zbcheck.crack_btree(znode, _.is_map)
assert kind == BTREE_NORMAL
assert keys == keyv
assert len(kids) == len(children)
retv = []
for (child, childOK) in zip(kids, children):
if childOK == 'T':
assert type(child) is type(znode)
retv.append(child)
elif childOK == 'B':
assert type(child) is znode._bucket_type
retv.append(child)
else:
assert child is childOK
return retv
# assertB asserts that znode is bucket node with specified keys and values
def assertB(znode, *kvv):
_ = xbtree._zclassify(znode)
assert _.is_zbucket
keys, values = zbcheck.crack_bucket(znode, _.is_map)
if not _.is_map:
assert values == []
assert keys == kvv
else:
assert len(keys) == len(values)
assert len(keys) == len(kvv)
for (i,(k,v)) in enumerate(zip(keys, values)):
kok, vok = kvv[i]
assert k == kok
assert v is vok
def test_restructure():
# do restructure tests under ZODB because without ZODB connection it is not
# always possible to __setstate__ for e.g. .../T/B. We also want to make
# sure Restructure correctly marks modified nodes as changed so that the
# changes are actually persisted to storage on commit.
zstor = MappingStorage()
db = DB(zstor)
zconn = db.open()
X = [] # X[i] -> XBlk corresponding to block #i
xv = 'abcdefghijkl'
for i in range(len(xv)):
X.append(XBlk(xv[i]))
def xdecode(v):
assert len(v) == 1
assert v in xv
return X[xv.index(v)]
def xencode(x):
assert isinstance(x, XBlk)
return x.data
# assertB wraps global assertB to automatically fill in X[k] values for specified keys.
def assertB(znode, *keyv):
globals()['assertB'](znode, *[(k,X[k]) for k in keyv])
# Z prepares XLOTree ztree with given keys via usual way.
# the tree is setup as {} k -> X[k].
def Z(*keys):
ztree = XLOTree()
zconn.add(ztree)
for k in keys:
ztree[k] = X[k]
# check all keys via iterating (this verifies firstbucket and B->next pointers)
keys2 = set(ztree.keys())
assert keys2 == set(keys)
# check all keys by [] access
for k in keys:
assert ztree[k] is X[k]
return ztree
# R restructures ztree to have specified new topology.
# The result is committed unless dontcommit=Y specified.
def R(ztree, newtopo, dontcommit=False):
# verify ztree consistency
items = list(ztree.items())
for (k,v) in items:
assert ztree[k] == v
if isinstance(newtopo, str):
newStructure = xbtree.TopoDecode(newtopo, xdecode)
else:
assert isinstance(newtopo, xbtree.Tree)
newStructure = newtopo
xbtree.Restructure(ztree, newStructure)
if not dontcommit:
transaction.commit()
# force objects state to be reloaded from storage.
# this leads further checks to also verify if Restructure modified a
# node, but did not marked it as changed. If this bug is indeed there -
# then the modifications will be lost after live cache clearance.
zconn.cacheMinimize()
assert xbtree.StructureOf(ztree, onlyKeys=True) == \
newStructure.copy(onlyKeys=True)
# verify iteration produces the same [] of (key, v)
assert list(ztree.items()) == items
# verify [k] gives the same v (for all k)
for (k,v) in items:
assert ztree[k] == v
# S returns topo-encoded keys-only structure of ztree.
# Sv returns topo-encoded structure of ztree with values.
def S(ztree):
return xbtree.TopoEncode(xbtree.StructureOf(ztree, onlyKeys=True))
def Sv(ztree):
return xbtree.TopoEncode(xbtree.StructureOf(ztree), xencode)
# Z0 creates new empty tree
def Z0():
z = Z()
assert crack_btree(z) == (BTREE_EMPTY, [], [])
return z
# ---- tests with manual verification of resulting topology and nodes ----
# ø -> T/B
z = Z0()
R(z, 'T/B')
assert crack_btree(z) == (BTREE_EMPTY, [], [])
with raises(ValueError, match="new keys != old keys"):
R(z, 'T/B1')
# ø -> T/T/B (don't - we don't emit topologies with empty buckets for
# tests since ZODB breaks on them)
"""
z = Z0()
R(z, 'T/T/B')
t, = assertT(z, [], 'T')
b, = assertT(t, [], 'B')
assertB(b)
"""
# ø -> T/T-T/B-B (don't - see ^^^)
"""
z = Z0()
R(z, 'T0/T-T/B-B')
Tl, Tr = assertT(z, [0], 'T','T')
bl, = assertT(Tl, [], 'B')
br, = assertT(Tr, [], 'B')
assertB(bl)
assertB(br)
"""
# tree with 1 k->v (not yet committed bucket)
z = Z(1)
assert crack_btree(z) == (BTREE_ONE, ((1, X[1]),), None)
R(z, 'T/B1', dontcommit=True)
assert crack_btree(z) == (BTREE_ONE, ((1, X[1]),), None)
R(z, 'T/T/B1', dontcommit=True)
t, = assertT(z, [], 'T')
b1, = assertT(t, [], 'B')
assertB(b1, 1)
assert b1._p_oid is not None
R(z, 'T/B1', dontcommit=True)
assertT(z, [], b1)
assertB(b1, 1)
# tree with 2 k->v (not-yet committed bucket)
z = Z(1,3)
assert crack_btree(z) == (BTREE_ONE, ((1, X[1], 3, X[3]),), None)
R(z, 'T2/B1-B3', dontcommit=True)
b1, b3 = assertT(z, [2], 'B','B')
assert b1._p_oid is None
assert b3._p_oid is None
assertB(b1, 1)
assertB(b3, 3)
R(z, 'T/B1,3')
# buckets were not yet assigned oid -> collapsed back into T
assert crack_btree(z) == (BTREE_ONE, ((1, X[1], 3, X[3]),), None)
R(z, 'T3/B1-B3', dontcommit=True)
b1, b3 = assertT(z, [3], 'B','B')
assert b1._p_oid is None
assert b3._p_oid is None
assertB(b1, 1)
assertB(b3, 3)
transaction.commit() # force buckets to be assigned oid
assert b1._p_oid is not None
assert b3._p_oid is not None
# restructure back - buckets not collapsed back into T
R(z, 'T/B1,3')
b13, = assertT(z, [], 'B')
assertB(b13, 1,3)
# add 1 key -> B splits -> B + B
assert S(z) == 'T/B1,3'
z[5] = X[5]
assert S(z) == 'T3/B1-B3,5'
b1, b35 = assertT(z, [3], 'B','B')
assertB(b1, 1)
assertB(b35, 3,5)
# -> T2/T-T/B1-B3,5 (add intermediate T-T level)
R(z, 'T2/T-T/B1-B3,5')
tl, tr = assertT(z, [2], 'T','T')
assertT(tl, [], b1)
assertT(tr, [], b35)
assertB(b1, 1)
assertB(b35, 3,5)
# -> T2/T-T/B1-T/B3,5 (add intermediate T level in right arm)
R(z, 'T2/T-T/B1-T/B3,5')
assertT(z, [2], tl, tr)
assertT(tl, [], b1)
trr, = assertT(tr, [], 'T')
assert isinstance(trr, XLOTree)
assertT(trr, [], b35)
assertB(b1, 1)
assertB(b35, 3,5)
# -> T2,4/B1-B3-B5 (kill intermediate trees, split B35->B3+B5)
R(z, 'T2,4/B1-B3-B5')
b3, = assertT(z, [2,4], b1,'B',b35)
b5 = b35; del b35
assertB(b1, 1)
assertB(b3, 3)
assertB(b5, 5)
# -> T2/T-T4/B1-B3-B5 (add intermediate T-T4 level)
R(z, 'T2/T-T4/B1-B3-B5')
tl, tr = assertT(z, [2], 'T','T')
assertT(tl, [], b1)
assertT(tr, [4], b3,b5)
assertB(b1, 1)
assertB(b3, 3)
assertB(b5, 5)
# -> T2/T-T/B1-T4/B3-B5 (add intermediate level in right arm)
R(z, 'T2/T-T/B1-T4/B3-B5')
tr, = assertT(z, [2], tl,'T')
assertT(tl, [], b1)
trr, = assertT(tr, [], 'T')
assertT(trr, [4], b3,b5)
assertB(b1, 1)
assertB(b3, 3)
assertB(b5, 5)
# -> T/B1,3,5 (collapse into T/B)
R(z, 'T/B1,3,5')
assertT(z, [], b1)
b135 = b1
assertB(b135, 1,3,5)
# grow the tree with four more keys (6,7,8,9) till top-level tree node splits
assert S(z) == 'T/B1,3,5'
z[6] = X[6]
assert S(z) == 'T5/B1,3-B5,6'
z[7] = X[7]
assert S(z) == 'T5,6/B1,3-B5-B6,7'
z[8] = X[8]
assert S(z) == 'T6/T5-T7/B1,3-B5-B6-B7,8'
# rotate keys in T and reflow B to the left
tl, tr = assertT(z, [6], 'T','T')
b13, b5 = assertT(tl, [5], 'B','B')
b6, b78 = assertT(tr, [7], 'B','B')
assertB(b13, 1,3)
assertB(b5, 5)
assertB(b6, 6)
assertB(b78, 7,8)
R(z, 'T7/T4,6-T/B1,3-B5-B6-B7,8')
assertT(z, [7], tl,tr)
assertT(tl, [4,6], b13,b5,b6)
assertT(tr, [], b78)
assertB(b13, 1,3)
assertB(b5, 5)
assertB(b6, 6)
assertB(b78, 7,8)
# migrate keys in between buckets
R(z, 'T6/T3-T8/B1-B3,5-B6,7-B8')
assertT(z, [6], tl,tr)
assertT(tl, [3], b13,b5)
assertT(tr, [8], b6,b78)
b1 = b13; del b13
b35 = b5; del b5
b67 = b6; del b6
b8 = b78; del b78
assertB(b1, 1)
assertB(b35, 3,5)
assertB(b67, 6,7)
assertB(b8, 8)
# ---- new structure given with values ----
z = Z(0,2)
R(z, T([1], Bv([0],X[0]), Bv([2],X[2])))
b0, b2 = assertT(z, [1], 'B','B')
assertB(b0, 0)
assertB(b2, 2)
assert b0[0] is X[0]
assert b2[2] is X[2]
# [2] changes value from X[2] to X[3]
with raises(ValueError, match=r"target bucket changes \[2\]"):
R(z, T([1], Bv([0],X[0]), Bv([2],X[3])))
# ---- tricky cases
z = Z(0,1,2,3)
R(z, 'T2/T1-T3/B0-B1-T-T/B2-B3')
R(z, 'T2/T1-T/T-T-B2,3/B0-B1')
# degenerate topology from ZODB example
z = Z(1,3,5,7,11)
R(z, 'T4/T2-T/T-T-T6,10/B1-B3-T-T-T/T-B7-B11/B5')
R(z, 'T/B1,3,5,7,11')
# verify that changed objects are marked as such and so included into commit
# (just R also partly verifies this on every call)
z = Z(0,2,3)
transaction.commit()
def Rz(newtopo):
R(z, newtopo, dontcommit=True)
transaction.commit()
assert Sv(z) == newtopo
zconn.cacheMinimize() # force z state to be reloaded from storage
assert Sv(z) == newtopo # will fail if T or B is not marked as changed
xbtree.zcheck(z)
Rz('T/B0:a,2:c,3:d')
Rz('T1/B0:a-B2:c,3:d')
Rz('T2/B0:a-B2:c,3:d')
Rz('T3/B0:a,2:c-B3:d')
Rz('T2/T-T3/B0:a-B2:c-B3:d')
# make sure that only modified nodes are marked as changed.
z = Z(0,1,2,3)
R(z, 'T1/T-T2/B0-B1-B2,3')
tl, tr = assertT(z, [1], 'T','T')
b0, = assertT(tl, [], 'B')
b1, b23 = assertT(tr, [2], 'B','B')
assertB(b0, 0)
assertB(b1, 1)
assertB(b23, 2,3)
assert z._p_changed == False
assert tl._p_changed == False
assert tr._p_changed == False
assert b0._p_changed == False
assert b1._p_changed == False
assert b23._p_changed == False
R(z, 'T1/T-T3/B0-B1,2-B3', dontcommit=True) # reflow right arm
assertT(z, [1], tl, tr)
assertT(tl, [], b0)
assertT(tr, [3], b1, b23) # changed
assertB(b0, 0)
assertB(b1, 1,2) # changed
assertB(b23, 3) # changed
assert z._p_changed == False
assert tl._p_changed == False
assert tr._p_changed == True
assert b0._p_changed == False
assert b1._p_changed == True
assert b23._p_changed == True
transaction.commit()
# ---- tests on automatically generated topologies ----
#
# ( we make sure that Restructure can make the restructurement and that
# after restructure a tree remains valid without any error introduced )
for nkeys in range(5): # XXX !slow -> ↑
for xkeyv in xbtree._iterSplitByN(-1, 5+1, nkeys):
keyv = xkeyv[1:-1] # -1, ..., N -> ...
#print(keyv)
z = Z(*keyv)
# d s Nvariants Ttest
# 3 2 35·10³ 40s
# 3 1 18·10³ 20s
# 2 2 8·10³ 8s
# 2 1 3·103 4s
# 1 1 1·10³ 1s
for tree in xbtree.AllStructs(keyv, 2, 1): # XXX !slow -> d=3, s=2
#print('\t%s' % xbtree.TopoEncode(tree))
R(z, tree)
def test_walkBFS():
R = xbtree._Range
# T/B
b = B()
t = T([], b)
walkv = list(xbtree.__walkBFS(t))
assert len(walkv) == 2 # (t) (b)
_ = walkv[0]
assert len(_) == 1
assert _[0].range == R(-inf, inf)
assert _[0].node is t
_ = walkv[1]
assert len(_) == 1
assert _[0].range == R(-inf, inf)
assert _[0].node is b
# T0/T-T/B-B
bl = B(); br = B()
tl = T([], bl)
tr = T([], br)
t = T([0], tl, tr)
walkv = list(xbtree.__walkBFS(t))
assert len(walkv) == 3 # (t) (tl, tr), (bl, br)
_ = walkv[0]
assert len(_) == 1
assert _[0].range == R(-inf, inf)
assert _[0].node is t
_ = walkv[1]
assert len(_) == 2
assert _[0].range == R(-inf, 0)
assert _[0].node is tl
assert _[1].range == R(0, inf)
assert _[1].node is tr
_ = walkv[2]
assert len(_) == 2
assert _[0].range == R(-inf, 0)
assert _[0].node is bl
assert _[1].range == R(0, inf)
assert _[1].node is br
# XXX more tests?
def test_zwalkBFS():
zt = buildDegenerateZTree()
# assign oid to created objects to force btrees not to embed bucket state
zstor = MappingStorage()
db = DB(zstor)
zconn = db.open()
zroot = zconn.root()
zroot['x'] = zt
transaction.commit()
def assertT(znode, keyv, *children):
assert isinstance(znode, IITreeSet)
return globals()['assertT'](znode, keyv, *children)
# assertB asserts that znode is bucket + has specified keys
def assertB(znode, *keyv):
assert isinstance(znode, IISet)
globals()['assertB'](znode, *keyv)
R = xbtree._Range
zwalkv = list(xbtree.__zwalkBFS(zt))
assert len(zwalkv) == 6 # [-∞,∞)T4,
# [-∞,4)T2, [4,∞)T
# [-∞,2)T, [2,4)T, [4,∞)T6,10
# [-∞,2)B1, [2,4)B3, [4,6)T, [6,10)T, [10,∞]T
# [4,6)T, [6,10)B7, [10,∞)B11
# [4,6)B5
_ = zwalkv[5] # [4,6)B5
assert len(_) == 1
assert _[0].range == R(4,6)
b5 = _[0].node; assertB(b5, 5)
_ = zwalkv[4] # [4,6)T, [6,10)B7, [10,∞)B11
assert len(_) == 3
assert _[0].range == R(4,6)
assert _[1].range == R(6,10)
assert _[2].range == R(10,inf)
t4_b5= _[0].node; assertT(t4_b5, [], b5)
b7 = _[1].node; assertB(b7, 7)
b11 = _[2].node; assertB(b11, 11)
_ = zwalkv[3] # [-∞,2)B1, [2,4)B3, [4,6)T, [6,10)T, [10,∞]T
assert len(_) == 5
assert _[0].range == R(-inf,2)
assert _[1].range == R(2,4)
assert _[2].range == R(4,6)
assert _[3].range == R(6,10)
assert _[4].range == R(10,inf)
b1 = _[0].node; assertB(b1, 1)
b3 = _[1].node; assertB(b3, 3)
t3_t4_b5 = _[2].node; assertT(t3_t4_b5, [], t4_b5)
t3_b7 = _[3].node; assertT(t3_b7, [], b7)
t3_b11 = _[4].node; assertT(t3_b11, [], b11)
_ = zwalkv[2] # [-∞,2)T, [2,4)T, [4,∞)T6,10
assert len(_) == 3
assert _[0].range == R(-inf,2)
assert _[1].range == R(2,4)
assert _[2].range == R(4,inf)
t2_b1 = _[0].node; assertT(t2_b1, [], b1)
t2_b3 = _[1].node; assertT(t2_b3, [], b3)
t2_610= _[2].node; assertT(t2_610, [6,10], t3_t4_b5, t3_b7, t3_b11)
_ = zwalkv[1] # [-∞,4)T2, [4,∞)T
assert len(_) == 2
assert _[0].range == R(-inf, 4)
assert _[1].range == R(4, inf)
t1_2 = _[0].node; assertT(t1_2, [2], t2_b1, t2_b3)
t1_t2_610 = _[1].node; assertT(t1_t2_610, [], t2_610)
_ = zwalkv[0] # [-∞,∞)T4,
assert len(_) == 1
assert _[0].range == R(-inf, inf)
assertT(_[0].node, [4], t1_2, t1_t2_610)
def test_keyvSliceBy():
X = xbtree._keyvSliceBy
assert X([], 0,0) == []
assert X([1], 0,0) == []
assert X([1], 0,1) == []
assert X([1], 1,1) == []
assert X([1], 1,2) == [1]
assert X([1,3,5,10,17], 3,10) == [3,5]
def test_iterSplitByN():
def X(lo, hi, nsplit):
return tuple(xbtree._iterSplitByN(lo, hi, nsplit))
assert X(0,0, 0) == ( [0,0], )
assert X(0,0, 1) == ()
assert X(0,1, 0) == ( [0,1], )
assert X(0,1, 1) == ()
assert X(0,2, 0) == ( [0,2], )
assert X(0,2, 1) == ( [0,1,2], )
assert X(0,2, 2) == ()
assert X(0,3, 0) == ( [0,3], )
assert X(0,3, 1) == ( [0,1,3], [0,2,3] )
assert X(0,3, 2) == ( [0,1,2,3], )
assert X(0,3, 3) == ()
assert X(0,4, 0) == ( [0,4], )
assert X(0,4, 1) == ( [0,1,4], [0,2,4], [0,3,4] )
assert X(0,4, 2) == ( [0,1,2,4], [0,1,3,4], [0,2,3,4] )
assert X(0,4, 3) == ( [0,1,2,3,4], )
assert X(0,4, 4) == ()
assert X(0,5, 0) == ( [0,5], )
assert X(0,5, 1) == ( [0,1,5], [0,2,5], [0,3,5], [0,4,5] )
assert X(0,5, 2) == ( [0,1,2,5], [0,1,3,5], [0,1,4,5], [0,2,3,5], [0,2,4,5], [0,3,4,5] )
assert X(0,5, 3) == ( [0,1,2,3,5], [0,1,2,4,5], [0,1,3,4,5], [0,2,3,4,5] )
assert X(0,5, 4) == ( [0,1,2,3,4,5], )
assert X(0,5, 5) == ()
def test_iterSplitKeyvByN():
keyv = [1,3,4]
def X(lo, hi, nsplit):
return tuple(xbtree._iterSplitKeyvByN(lo, hi, keyv, nsplit))
assert X(0,7, 0) == ( [0,7], )
assert X(0,7, 1) == ( [0,2,7], [0,3,7], [0,4,7] )
assert X(0,7, 2) == ( [0,2,4,7], [0,3,4,7] )
assert X(0,7, 3) == ()
......@@ -28,8 +28,10 @@ import (
"fmt"
"runtime"
"sync"
"sync/atomic"
"time"
"unsafe"
"math/rand"
)
// iface is how Go runtime represents an interface.
......@@ -78,8 +80,12 @@ func NewRef(obj interface{}) *Ref {
state: objLive,
}
var release func(interface{})
release = func(obj interface{}) {
runtime.SetFinalizer(obj, w.release)
runtime.KeepAlive(obj)
return w
}
func (w *Ref) release(obj interface{}) {
ifobj := *(*iface)(unsafe.Pointer(&obj))
if w.iface != ifobj {
panic(fmt.Sprintf("weak: release: w.iface != obj; w.iface=%x obj=%x", w.iface, ifobj))
......@@ -94,17 +100,17 @@ func NewRef(obj interface{}) *Ref {
w.mu.Lock()
if w.state == objGot {
w.state = objLive
runtime.SetFinalizer(obj, release)
runtime.SetFinalizer(obj, w.release)
} else {
w.state = objReleased
}
w.mu.Unlock()
}
runtime.SetFinalizer(obj, release)
return w
runtime.KeepAlive(obj)
}
var xxx uint64
// Get returns object pointed to by this weak reference.
//
// If original object is still alive - it is returned.
......@@ -114,11 +120,36 @@ func (w *Ref) Get() (obj interface{}) {
if w.state != objReleased {
w.state = objGot
time.Sleep(100*time.Nanosecond)
//time.Sleep(100*time.Nanosecond)
// recreate interface{} from saved words.
// XXX do writes as pointers so that compiler emits write barriers to notify GC?
i := (*iface)(unsafe.Pointer(&obj))
*i = w.iface
//*i = w.iface
r := rand.Int31n(2)
_ = r
//if r % 2 == 0 {
if true {
i.data = w.iface.data
} else {
i.typ = w.iface.typ
}
atomic.AddUint64(&xxx, 1) // barrier
//time.Sleep(100*time.Nanosecond)
time.Sleep(100*time.Nanosecond)
// go runtime.GC()
//time.Sleep(1000*time.Nanosecond)
// time.Sleep(100*time.Nanosecond)
// runtime.GC()
//if r % 2 == 0 {
if true {
i.typ = w.iface.typ
} else {
i.data = w.iface.data
}
}
w.mu.Unlock()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment