Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Z
ZODB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
ZODB
Commits
2a1ae51f
Commit
2a1ae51f
authored
Mar 15, 2001
by
Jim Fulton
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Merged changes from Catalog-BTrees-Integration branch.
parent
0b031bf1
Changes
21
Show whitespace changes
Inline
Side-by-side
Showing
21 changed files
with
2394 additions
and
434 deletions
+2394
-434
trunk/src/BTrees/BTreeItemsTemplate.c
trunk/src/BTrees/BTreeItemsTemplate.c
+96
-69
trunk/src/BTrees/BTreeModuleTemplate.c
trunk/src/BTrees/BTreeModuleTemplate.c
+21
-6
trunk/src/BTrees/BTreeTemplate.c
trunk/src/BTrees/BTreeTemplate.c
+290
-164
trunk/src/BTrees/BucketTemplate.c
trunk/src/BTrees/BucketTemplate.c
+251
-72
trunk/src/BTrees/IOBTree.c
trunk/src/BTrees/IOBTree.c
+1
-1
trunk/src/BTrees/Interfaces.py
trunk/src/BTrees/Interfaces.py
+96
-0
trunk/src/BTrees/Length.py
trunk/src/BTrees/Length.py
+114
-0
trunk/src/BTrees/MergeTemplate.c
trunk/src/BTrees/MergeTemplate.c
+339
-0
trunk/src/BTrees/SetOpTemplate.c
trunk/src/BTrees/SetOpTemplate.c
+31
-1
trunk/src/BTrees/SetTemplate.c
trunk/src/BTrees/SetTemplate.c
+71
-54
trunk/src/BTrees/TreeSetTemplate.c
trunk/src/BTrees/TreeSetTemplate.c
+59
-1
trunk/src/BTrees/convert.py
trunk/src/BTrees/convert.py
+99
-0
trunk/src/BTrees/intkeymacros.h
trunk/src/BTrees/intkeymacros.h
+2
-1
trunk/src/BTrees/intvaluemacros.h
trunk/src/BTrees/intvaluemacros.h
+2
-1
trunk/src/BTrees/tests/__init__.py
trunk/src/BTrees/tests/__init__.py
+2
-0
trunk/src/BTrees/tests/testBTrees.py
trunk/src/BTrees/tests/testBTrees.py
+112
-43
trunk/src/BTrees/tests/testConflict.py
trunk/src/BTrees/tests/testConflict.py
+538
-0
trunk/src/ZODB/BaseStorage.py
trunk/src/ZODB/BaseStorage.py
+4
-3
trunk/src/ZODB/ConflictResolution.py
trunk/src/ZODB/ConflictResolution.py
+201
-0
trunk/src/ZODB/Connection.py
trunk/src/ZODB/Connection.py
+48
-9
trunk/src/ZODB/FileStorage.py
trunk/src/ZODB/FileStorage.py
+17
-9
No files found.
trunk/src/BTrees/BTreeItemsTemplate.c
View file @
2a1ae51f
...
@@ -237,7 +237,9 @@ BTreeItems_seek(BTreeItems *self, int i)
...
@@ -237,7 +237,9 @@ BTreeItems_seek(BTreeItems *self, int i)
/* Whew, we got here so we have a valid offset! */
/* Whew, we got here so we have a valid offset! */
while
((
delta
=
i
-
pseudoindex
)
!=
0
)
delta
=
i
-
pseudoindex
;
if
(
delta
)
while
(
delta
)
{
{
if
(
delta
<
0
)
if
(
delta
<
0
)
{
{
...
@@ -285,6 +287,8 @@ BTreeItems_seek(BTreeItems *self, int i)
...
@@ -285,6 +287,8 @@ BTreeItems_seek(BTreeItems *self, int i)
{
{
while
(
1
)
while
(
1
)
{
{
if
(
currentbucket
==
self
->
lastbucket
)
goto
no_match
;
if
((
b
=
currentbucket
->
next
)
==
NULL
)
goto
no_match
;
if
((
b
=
currentbucket
->
next
)
==
NULL
)
goto
no_match
;
delta
-=
currentbucket
->
len
-
currentoffset
;
delta
-=
currentbucket
->
len
-
currentoffset
;
pseudoindex
+=
(
currentbucket
->
len
-
currentoffset
);
pseudoindex
+=
(
currentbucket
->
len
-
currentoffset
);
...
@@ -305,6 +309,15 @@ BTreeItems_seek(BTreeItems *self, int i)
...
@@ -305,6 +309,15 @@ BTreeItems_seek(BTreeItems *self, int i)
currentoffset
>
self
->
last
)
goto
no_match
;
currentoffset
>
self
->
last
)
goto
no_match
;
}
}
delta
=
i
-
pseudoindex
;
}
else
{
/* Sanity check current bucket/offset */
if
(
currentbucket
==
self
->
firstbucket
&&
currentoffset
<
self
->
first
)
goto
no_match
;
if
(
currentbucket
==
self
->
lastbucket
&&
currentoffset
>
self
->
last
)
goto
no_match
;
}
}
PER_ALLOW_DEACTIVATION
(
currentbucket
);
PER_ALLOW_DEACTIVATION
(
currentbucket
);
...
@@ -472,14 +485,26 @@ newBTreeItems(char kind,
...
@@ -472,14 +485,26 @@ newBTreeItems(char kind,
UNLESS
(
self
=
PyObject_NEW
(
BTreeItems
,
&
BTreeItemsType
))
return
NULL
;
UNLESS
(
self
=
PyObject_NEW
(
BTreeItems
,
&
BTreeItemsType
))
return
NULL
;
self
->
kind
=
kind
;
self
->
kind
=
kind
;
self
->
first
=
lowoffset
;
self
->
first
=
lowoffset
;
self
->
last
=
highoffset
;
self
->
last
=
highoffset
;
Py_XINCREF
(
lowbucket
);
if
(
!
lowbucket
||
(
lowbucket
==
highbucket
&&
lowoffset
>
highoffset
))
{
self
->
firstbucket
=
0
;
self
->
lastbucket
=
0
;
self
->
currentbucket
=
0
;
}
else
{
Py_INCREF
(
lowbucket
);
self
->
firstbucket
=
lowbucket
;
self
->
firstbucket
=
lowbucket
;
Py_XINCREF
(
highbucket
);
Py_XINCREF
(
highbucket
);
self
->
lastbucket
=
highbucket
;
self
->
lastbucket
=
highbucket
;
Py_XINCREF
(
lowbucket
);
Py_XINCREF
(
lowbucket
);
self
->
currentbucket
=
lowbucket
;
self
->
currentbucket
=
lowbucket
;
}
self
->
currentoffset
=
lowoffset
;
self
->
currentoffset
=
lowoffset
;
self
->
pseudoindex
=
0
;
self
->
pseudoindex
=
0
;
...
@@ -525,6 +550,7 @@ nextBTreeItems(SetIteration *i)
...
@@ -525,6 +550,7 @@ nextBTreeItems(SetIteration *i)
PyErr_Clear
();
PyErr_Clear
();
}
}
}
}
return
0
;
}
}
static
int
static
int
...
@@ -558,4 +584,5 @@ nextTreeSetItems(SetIteration *i)
...
@@ -558,4 +584,5 @@ nextTreeSetItems(SetIteration *i)
PyErr_Clear
();
PyErr_Clear
();
}
}
}
}
return
0
;
}
}
trunk/src/BTrees/BTreeModuleTemplate.c
View file @
2a1ae51f
...
@@ -85,7 +85,7 @@
...
@@ -85,7 +85,7 @@
static
char
BTree_module_documentation
[]
=
static
char
BTree_module_documentation
[]
=
""
""
"
\n
$Id: BTreeModuleTemplate.c,v 1.
4 2001/02/19 18:15:10
jim Exp $"
"
\n
$Id: BTreeModuleTemplate.c,v 1.
5 2001/03/15 13:16:22
jim Exp $"
;
;
#ifdef PERSISTENT
#ifdef PERSISTENT
...
@@ -113,7 +113,7 @@ static char BTree_module_documentation[] =
...
@@ -113,7 +113,7 @@ static char BTree_module_documentation[] =
#define PER_CHANGED(O) 0
#define PER_CHANGED(O) 0
#endif
#endif
PyObject
*
sort_str
,
*
reverse
_str
;
static
PyObject
*
sort_str
,
*
reverse_str
,
*
items_str
,
*
__setstate__
_str
;
static
void
PyVar_Assign
(
PyObject
**
v
,
PyObject
*
e
)
{
Py_XDECREF
(
*
v
);
*
v
=
e
;}
static
void
PyVar_Assign
(
PyObject
**
v
,
PyObject
*
e
)
{
Py_XDECREF
(
*
v
);
*
v
=
e
;}
#define ASSIGN(V,E) PyVar_Assign(&(V),(E))
#define ASSIGN(V,E) PyVar_Assign(&(V),(E))
...
@@ -129,6 +129,9 @@ static void PyVar_Assign(PyObject **v, PyObject *e) { Py_XDECREF(*v); *v=e;}
...
@@ -129,6 +129,9 @@ static void PyVar_Assign(PyObject **v, PyObject *e) { Py_XDECREF(*v); *v=e;}
#define SameType_Check(O1, O2) ((O1)->ob_type==(O2)->ob_type)
#define SameType_Check(O1, O2) ((O1)->ob_type==(O2)->ob_type)
#define ASSERT(C, S, R) if (! (C)) { \
PyErr_SetString(PyExc_AssertionError, (S)); return (R); }
typedef
struct
BTreeItemStruct
{
typedef
struct
BTreeItemStruct
{
KEY_TYPE
key
;
KEY_TYPE
key
;
PyObject
*
value
;
PyObject
*
value
;
...
@@ -163,6 +166,9 @@ typedef struct {
...
@@ -163,6 +166,9 @@ typedef struct {
BTreeItem
*
data
;
BTreeItem
*
data
;
}
BTree
;
}
BTree
;
staticforward
PyExtensionClass
BTreeType
;
#define BTREE(O) ((BTree*)(O))
#define BTREE(O) ((BTree*)(O))
typedef
struct
SetIteration_s
typedef
struct
SetIteration_s
...
@@ -267,6 +273,8 @@ PyMalloc(size_t sz)
...
@@ -267,6 +273,8 @@ PyMalloc(size_t sz)
{
{
void
*
r
;
void
*
r
;
ASSERT
(
sz
>
0
,
"non-positive size malloc"
,
NULL
);
if
(
r
=
malloc
(
sz
))
return
r
;
if
(
r
=
malloc
(
sz
))
return
r
;
PyErr_NoMemory
();
PyErr_NoMemory
();
...
@@ -278,10 +286,14 @@ PyRealloc(void *p, size_t sz)
...
@@ -278,10 +286,14 @@ PyRealloc(void *p, size_t sz)
{
{
void
*
r
;
void
*
r
;
if
(
r
=
realloc
(
p
,
sz
))
return
r
;
ASSERT
(
sz
>
0
,
"non-positive size realloc"
,
NULL
)
;
PyErr_NoMemory
();
if
(
p
)
r
=
realloc
(
p
,
sz
);
return
NULL
;
else
r
=
malloc
(
sz
);
UNLESS
(
r
)
PyErr_NoMemory
();
return
r
;
}
}
#include "BTreeItemsTemplate.c"
#include "BTreeItemsTemplate.c"
...
@@ -290,6 +302,7 @@ PyRealloc(void *p, size_t sz)
...
@@ -290,6 +302,7 @@ PyRealloc(void *p, size_t sz)
#include "BTreeTemplate.c"
#include "BTreeTemplate.c"
#include "TreeSetTemplate.c"
#include "TreeSetTemplate.c"
#include "SetOpTemplate.c"
#include "SetOpTemplate.c"
#include "MergeTemplate.c"
static
struct
PyMethodDef
module_methods
[]
=
{
static
struct
PyMethodDef
module_methods
[]
=
{
{
"difference"
,
(
PyCFunction
)
difference_m
,
METH_VARARGS
,
{
"difference"
,
(
PyCFunction
)
difference_m
,
METH_VARARGS
,
...
@@ -324,6 +337,8 @@ INITMODULE ()
...
@@ -324,6 +337,8 @@ INITMODULE ()
UNLESS
(
sort_str
=
PyString_FromString
(
"sort"
))
return
;
UNLESS
(
sort_str
=
PyString_FromString
(
"sort"
))
return
;
UNLESS
(
reverse_str
=
PyString_FromString
(
"reverse"
))
return
;
UNLESS
(
reverse_str
=
PyString_FromString
(
"reverse"
))
return
;
UNLESS
(
items_str
=
PyString_FromString
(
"items"
))
return
;
UNLESS
(
__setstate___str
=
PyString_FromString
(
"__setstate__"
))
return
;
UNLESS
(
PyExtensionClassCAPI
=
PyCObject_Import
(
"ExtensionClass"
,
"CAPI"
))
UNLESS
(
PyExtensionClassCAPI
=
PyCObject_Import
(
"ExtensionClass"
,
"CAPI"
))
return
;
return
;
...
@@ -372,7 +387,7 @@ INITMODULE ()
...
@@ -372,7 +387,7 @@ INITMODULE ()
d
=
PyModule_GetDict
(
m
);
d
=
PyModule_GetDict
(
m
);
PyDict_SetItemString
(
d
,
"__version__"
,
PyDict_SetItemString
(
d
,
"__version__"
,
PyString_FromString
(
"$Revision: 1.
4
$"
));
PyString_FromString
(
"$Revision: 1.
5
$"
));
PyExtensionClass_Export
(
d
,
PREFIX
"Bucket"
,
BucketType
);
PyExtensionClass_Export
(
d
,
PREFIX
"Bucket"
,
BucketType
);
PyExtensionClass_Export
(
d
,
PREFIX
"BTree"
,
BTreeType
);
PyExtensionClass_Export
(
d
,
PREFIX
"BTree"
,
BTreeType
);
...
...
trunk/src/BTrees/BTreeTemplate.c
View file @
2a1ae51f
...
@@ -114,9 +114,11 @@ _BTree_get(BTree *self, PyObject *keyarg, int has_key)
...
@@ -114,9 +114,11 @@ _BTree_get(BTree *self, PyObject *keyarg, int has_key)
}
}
if
(
SameType_Check
(
self
,
self
->
data
[
min
].
value
))
if
(
SameType_Check
(
self
,
self
->
data
[
min
].
value
))
r
=
_BTree_get
(
BTREE
(
self
->
data
[
min
].
value
),
keyarg
,
has_key
);
r
=
_BTree_get
(
BTREE
(
self
->
data
[
min
].
value
),
keyarg
,
has_key
?
has_key
+
1
:
0
);
else
else
r
=
_bucket_get
(
BUCKET
(
self
->
data
[
min
].
value
),
keyarg
,
has_key
);
r
=
_bucket_get
(
BUCKET
(
self
->
data
[
min
].
value
),
keyarg
,
has_key
?
has_key
+
1
:
0
);
}
}
else
else
{
/* No data */
{
/* No data */
...
@@ -140,27 +142,23 @@ BTree_get(BTree *self, PyObject *key)
...
@@ -140,27 +142,23 @@ BTree_get(BTree *self, PyObject *key)
}
}
/*
/*
** BTree_split
Copy data from the current BTree to the newly created BTree, next.
**
Reset length to reflect the fact that we've given up some data.
** Splits a BTree at a given index
**
** Arguments: self The original BTree
** index The index to split at (if out of bounds use midpoint)
** next The BTree to split into
**
** Returns: 0 on success
** -1 on failure
*/
*/
static
int
static
int
BTree_split
(
BTree
*
self
,
int
index
,
BTree
*
next
)
BTree_split
(
BTree
*
self
,
int
index
,
BTree
*
next
)
{
{
int
next_size
;
ASSERT
(
self
->
len
>
1
,
"split of empty tree"
,
-
1
);
if
(
index
<
0
||
index
>=
self
->
len
)
index
=
self
->
len
/
2
;
if
(
index
<
0
||
index
>=
self
->
len
)
index
=
self
->
len
/
2
;
UNLESS
(
next
->
data
=
PyMalloc
(
sizeof
(
BTreeItem
)
*
(
self
->
len
-
index
)))
next_size
=
self
->
len
-
index
;
return
-
1
;
ASSERT
(
next_size
>
0
,
"split creates empty tree"
,
-
1
);
next
->
len
=
self
->
len
-
index
;
next
->
size
=
next
->
len
;
UNLESS
(
next
->
data
=
PyMalloc
(
sizeof
(
BTreeItem
)
*
next_size
))
return
-
1
;
memcpy
(
next
->
data
,
self
->
data
+
index
,
sizeof
(
BTreeItem
)
*
next
->
size
);
memcpy
(
next
->
data
,
self
->
data
+
index
,
sizeof
(
BTreeItem
)
*
next_size
);
next
->
size
=
next
->
len
=
next_size
;
self
->
len
=
index
;
self
->
len
=
index
;
...
@@ -168,28 +166,20 @@ BTree_split(BTree *self, int index, BTree *next)
...
@@ -168,28 +166,20 @@ BTree_split(BTree *self, int index, BTree *next)
{
{
PER_USE_OR_RETURN
(
BTREE
(
next
->
data
->
value
),
-
1
);
PER_USE_OR_RETURN
(
BTREE
(
next
->
data
->
value
),
-
1
);
next
->
firstbucket
=
BTREE
(
next
->
data
->
value
)
->
firstbucket
;
next
->
firstbucket
=
BTREE
(
next
->
data
->
value
)
->
firstbucket
;
Py_
INCREF
(
self
->
firstbucket
);
Py_
XINCREF
(
next
->
firstbucket
);
PER_ALLOW_DEACTIVATION
(
BTREE
(
next
->
data
->
value
));
PER_ALLOW_DEACTIVATION
(
BTREE
(
next
->
data
->
value
));
}
}
else
else
{
{
next
->
firstbucket
=
BUCKET
(
next
->
data
->
value
);
next
->
firstbucket
=
BUCKET
(
next
->
data
->
value
);
Py_INCREF
(
next
->
firstbucket
);
Py_
X
INCREF
(
next
->
firstbucket
);
}
}
return
0
;
return
0
;
}
}
/*
/* Split out data among two newly created BTrees, which become
** BTree_clone
out children.
**
** Split a BTree node into two children, leaving the original node the
** parent.
**
** Arguments: self The BTree
**
** Returns: 0 on success
** -1 on failure
*/
*/
static
int
static
int
BTree_clone
(
BTree
*
self
)
BTree_clone
(
BTree
*
self
)
...
@@ -217,7 +207,7 @@ BTree_clone(BTree *self)
...
@@ -217,7 +207,7 @@ BTree_clone(BTree *self)
n1
->
len
=
self
->
len
;
n1
->
len
=
self
->
len
;
n1
->
data
=
self
->
data
;
n1
->
data
=
self
->
data
;
n1
->
firstbucket
=
self
->
firstbucket
;
n1
->
firstbucket
=
self
->
firstbucket
;
Py_INCREF
(
n1
->
firstbucket
);
Py_
X
INCREF
(
n1
->
firstbucket
);
/* Initialize our data to hold split data */
/* Initialize our data to hold split data */
self
->
data
=
d
;
self
->
data
=
d
;
...
@@ -225,7 +215,10 @@ BTree_clone(BTree *self)
...
@@ -225,7 +215,10 @@ BTree_clone(BTree *self)
self
->
size
=
2
;
self
->
size
=
2
;
self
->
data
->
value
=
OBJECT
(
n1
);
self
->
data
->
value
=
OBJECT
(
n1
);
COPY_KEY
(
self
->
data
[
1
].
key
,
n2
->
data
->
key
);
COPY_KEY
(
self
->
data
[
1
].
key
,
n2
->
data
->
key
);
INCREF_KEY
(
self
->
data
[
1
].
key
);
/* We take the unused reference from n2, so there's no reason to INCREF! */
/* INCREF_KEY(self->data[1].key); */
self
->
data
[
1
].
value
=
OBJECT
(
n2
);
self
->
data
[
1
].
value
=
OBJECT
(
n2
);
return
0
;
return
0
;
...
@@ -233,7 +226,7 @@ BTree_clone(BTree *self)
...
@@ -233,7 +226,7 @@ BTree_clone(BTree *self)
err:
err:
Py_XDECREF
(
n1
);
Py_XDECREF
(
n1
);
Py_XDECREF
(
n2
);
Py_XDECREF
(
n2
);
free
(
d
);
if
(
d
)
free
(
d
);
return
-
1
;
return
-
1
;
}
}
...
@@ -249,7 +242,7 @@ err:
...
@@ -249,7 +242,7 @@ err:
** -1 on failure
** -1 on failure
*/
*/
static
int
static
int
BTree_grow
(
BTree
*
self
,
int
index
)
BTree_grow
(
BTree
*
self
,
int
index
,
int
noval
)
{
{
int
i
;
int
i
;
PyObject
*
v
,
*
e
=
0
;
PyObject
*
v
,
*
e
=
0
;
...
@@ -309,22 +302,35 @@ BTree_grow(BTree *self, int index)
...
@@ -309,22 +302,35 @@ BTree_grow(BTree *self, int index)
if
(
SameType_Check
(
self
,
v
))
if
(
SameType_Check
(
self
,
v
))
{
{
COPY_KEY
(
d
->
key
,
BTREE
(
e
)
->
data
->
key
);
COPY_KEY
(
d
->
key
,
BTREE
(
e
)
->
data
->
key
);
/* We take the unused reference from e, so there's no
reason to INCREF!
*/
/* INCREF_KEY(self->data[1].key); */
}
}
else
else
{
{
COPY_KEY
(
d
->
key
,
BUCKET
(
e
)
->
keys
[
0
]);
COPY_KEY
(
d
->
key
,
BUCKET
(
e
)
->
keys
[
0
]);
}
INCREF_KEY
(
d
->
key
);
INCREF_KEY
(
d
->
key
);
}
d
->
value
=
e
;
d
->
value
=
e
;
self
->
len
++
;
self
->
len
++
;
if
(
self
->
len
>=
MAX_BTREE_SIZE
(
self
)
*
2
)
return
BTree_clone
(
self
);
if
(
self
->
len
>=
MAX_BTREE_SIZE
(
self
)
*
2
)
return
BTree_clone
(
self
);
}
}
else
{
if
(
noval
)
{
UNLESS
(
d
->
value
=
PyObject_CallObject
(
OBJECT
(
&
SetType
),
NULL
))
return
-
1
;
}
else
else
{
{
UNLESS
(
d
->
value
=
PyObject_CallObject
(
OBJECT
(
&
BucketType
),
NULL
))
UNLESS
(
d
->
value
=
PyObject_CallObject
(
OBJECT
(
&
BucketType
),
NULL
))
return
-
1
;
return
-
1
;
}
self
->
len
=
1
;
self
->
len
=
1
;
Py_INCREF
(
d
->
value
);
Py_INCREF
(
d
->
value
);
self
->
firstbucket
=
BUCKET
(
d
->
value
);
self
->
firstbucket
=
BUCKET
(
d
->
value
);
...
@@ -376,24 +382,21 @@ BTree_deleteNextBucket(BTree *self)
...
@@ -376,24 +382,21 @@ BTree_deleteNextBucket(BTree *self)
}
}
/*
/*
** _BTree_set
Set (value != 0) or delete (value=0) a tree item.
**
** inserts a key/value pair into the tree
If unique is non-zero, then only change if the key is
**
new.
** Arguments: self The BTree
** key The key of the item to insert
If noval is non-zero, then don't set a value (the tree
** value The object to insert
is a set).
** unique We are inserting a unique key
**
Return 1 on successful change, 0 is no change, -1 on error.
** Returns: -1 on failure
** 0 on successful replacement
** 1 on successful insert with growth
*/
*/
static
int
static
int
_BTree_set
(
BTree
*
self
,
PyObject
*
keyarg
,
PyObject
*
value
,
_BTree_set
(
BTree
*
self
,
PyObject
*
keyarg
,
PyObject
*
value
,
int
unique
,
int
noval
)
int
unique
,
int
noval
)
{
{
int
i
,
min
,
max
,
cmp
,
grew
,
copied
=
1
;
int
i
,
min
,
max
,
cmp
,
grew
,
copied
=
1
,
changed
=
0
,
bchanged
=
0
;
BTreeItem
*
d
;
BTreeItem
*
d
;
KEY_TYPE
key
;
KEY_TYPE
key
;
...
@@ -406,7 +409,7 @@ _BTree_set(BTree *self, PyObject *keyarg, PyObject *value,
...
@@ -406,7 +409,7 @@ _BTree_set(BTree *self, PyObject *keyarg, PyObject *value,
{
{
if
(
value
)
if
(
value
)
{
{
if
(
BTree_grow
(
self
,
0
)
<
0
)
return
-
1
;
if
(
BTree_grow
(
self
,
0
,
noval
)
<
0
)
return
-
1
;
}
}
else
else
{
{
...
@@ -433,73 +436,113 @@ _BTree_set(BTree *self, PyObject *keyarg, PyObject *value,
...
@@ -433,73 +436,113 @@ _BTree_set(BTree *self, PyObject *keyarg, PyObject *value,
if
(
SameType_Check
(
self
,
d
->
value
))
if
(
SameType_Check
(
self
,
d
->
value
))
grew
=
_BTree_set
(
BTREE
(
d
->
value
),
keyarg
,
value
,
unique
,
noval
);
grew
=
_BTree_set
(
BTREE
(
d
->
value
),
keyarg
,
value
,
unique
,
noval
);
else
else
grew
=
_bucket_set
(
BUCKET
(
d
->
value
),
keyarg
,
value
,
unique
,
noval
);
grew
=
_bucket_set
(
BUCKET
(
d
->
value
),
keyarg
,
value
,
unique
,
noval
,
&
bchanged
);
if
(
grew
<
0
)
goto
err
;
if
(
grew
<
0
)
goto
err
;
if
(
grew
)
if
(
grew
)
{
{
bchanged
=
1
;
/* A bucket changed size */
if
(
value
)
/* got bigger */
if
(
value
)
/* got bigger */
{
{
if
(
SameType_Check
(
self
,
d
->
value
))
if
(
SameType_Check
(
self
,
d
->
value
))
{
{
if
(
BTREE
(
d
->
value
)
->
len
>
MAX_BTREE_SIZE
(
d
->
value
)
if
(
BTREE
(
d
->
value
)
->
len
>
MAX_BTREE_SIZE
(
d
->
value
))
&&
BTree_grow
(
self
,
min
)
<
0
)
{
goto
err
;
if
(
BTree_grow
(
self
,
min
,
noval
)
<
0
)
goto
err
;
changed
=
1
;
}
}
}
else
else
{
{
if
(
BUCKET
(
d
->
value
)
->
len
>
MAX_BUCKET_SIZE
(
d
->
value
)
if
(
BUCKET
(
d
->
value
)
->
len
>
MAX_BUCKET_SIZE
(
d
->
value
))
&&
BTree_grow
(
self
,
min
)
<
0
)
{
goto
err
;
if
(
BTree_grow
(
self
,
min
,
noval
)
<
0
)
goto
err
;
changed
=
1
;
}
}
}
}
}
else
/* got smaller */
else
/* got smaller */
{
{
if
(
min
&&
grew
>
1
)
{
/* Somebody below us deleted their first bucket and */
/* and an intermediate tree couldn't handle it. */
if
(
BTree_deleteNextBucket
(
BTREE
(
d
[
-
1
].
value
))
<
0
)
goto
err
;
grew
=
1
;
/* Reset flag, since we handled it */
}
if
(
BUCKET
(
d
->
value
)
->
len
==
0
)
if
(
BUCKET
(
d
->
value
)
->
len
==
0
)
{
{
/* Got empty */
if
(
!
SameType_Check
(
self
,
d
->
value
))
{
/* We are about to delete a bucket. */
if
(
min
)
if
(
min
)
{
{
/*If it's not our first bucket, we can tell the
/* Not the first subtree, we can delete it because
previous bucket to adjust it's reference to
we have the previous subtree handy.
it. */
*/
if
(
Bucket_deleteNextBucket
(
BUCKET
(
d
[
-
1
].
value
))
<
0
)
if
(
SameType_Check
(
self
,
d
->
value
))
{
if
(
0
&&
BTree_deleteNextBucket
(
BTREE
(
d
[
-
1
].
value
))
<
0
)
goto
err
;
goto
err
;
}
}
else
else
{
{
/* If it's the first bucket, we can't adjust the
if
(
Bucket_deleteNextBucket
(
BUCKET
(
d
[
-
1
].
value
))
<
0
)
reference to it ourselves, so we'll just
goto
err
;
increment the grew flag to indicate to a
parent node that it's last bucket should
adjust its reference. If there is no parent,
then there's nothing to do. */
grew
++
;
}
}
}
self
->
len
--
;
self
->
len
--
;
Py_DECREF
(
d
->
value
);
Py_DECREF
(
d
->
value
);
DECREF_KEY
(
d
->
key
);
if
(
min
)
DECREF_KEY
(
d
->
key
);
if
(
min
<
self
->
len
)
if
(
min
<
self
->
len
)
memmove
(
d
,
d
+
1
,
(
self
->
len
-
min
)
*
sizeof
(
BTreeItem
));
memmove
(
d
,
d
+
1
,
(
self
->
len
-
min
)
*
sizeof
(
BTreeItem
));
}
if
(
self
->
len
==
1
&&
BUCKET
(
self
->
data
->
value
)
->
len
==
0
)
if
(
!
min
)
if
(
self
->
len
)
{
/* We just deleted our first child, so we need to
adjust our first bucket. */
if
(
SameType_Check
(
self
,
self
->
data
->
value
))
{
{
/* Our last subtree is empty, woo hoo, we can delete it! */
UNLESS
(
PER_USE
(
BTREE
(
self
->
data
->
value
)))
goto
err
;
Py_DECREF
(
self
->
data
->
value
);
ASSIGNB
(
self
->
firstbucket
,
BTREE
(
self
->
data
->
value
)
->
firstbucket
);
/* Ah hah! I bet you are wondering why we don't
Py_XINCREF
(
self
->
firstbucket
);
decref the first key. We don't decref it because
PER_ALLOW_DEACTIVATION
(
BTREE
(
self
->
data
->
value
));
we don't initialize it in the first place. So
}
there!
else
{
ASSIGNB
(
self
->
firstbucket
,
BUCKET
(
self
->
data
->
value
));
Py_INCREF
(
self
->
firstbucket
);
}
/* We can toss our first key now */
DECREF_KEY
(
self
->
data
->
key
);
DECREF_KEY
(
self
->
data
->
key
);
*/
self
->
len
=
0
;
Py_DECREF
(
self
->
firstbucket
);
self
->
firstbucket
=
NULL
;
}
}
else
{
Py_XDECREF
(
self
->
firstbucket
);
self
->
firstbucket
=
0
;
}
changed
=
1
;
}
}
}
}
if
(
PER_CHANGED
(
self
)
<
0
)
goto
err
;
}
}
#ifdef PERSISTENT
if
(
changed
||
(
bchanged
/* The bucket changed */
&&
self
->
len
==
1
/* We have only one */
&&
!
SameType_Check
(
self
,
self
->
data
->
value
)
/* It's our child */
&&
BUCKET
(
self
->
data
->
value
)
->
oid
==
NULL
/* It's in our record */
)
)
if
(
PER_CHANGED
(
self
)
<
0
)
goto
err
;
#endif
PER_ALLOW_DEACTIVATION
(
self
);
PER_ALLOW_DEACTIVATION
(
self
);
return
grew
;
return
grew
;
...
@@ -540,18 +583,35 @@ BTree_setitem(BTree *self, PyObject *key, PyObject *v)
...
@@ -540,18 +583,35 @@ BTree_setitem(BTree *self, PyObject *key, PyObject *v)
static
int
static
int
_BTree_clear
(
BTree
*
self
)
_BTree_clear
(
BTree
*
self
)
{
{
int
i
;
int
i
,
l
;
/* The order in which we dealocate, from "top to bottom" is critical
to prevent memory memory errors when the deallocation stack
becomes huge when dealocating use linked lists of buckets.
*/
for
(
i
=
self
->
len
;
--
i
>=
0
;
)
if
(
self
->
firstbucket
)
{
ASSERT
(
self
->
firstbucket
->
ob_refcnt
>
1
,
"Invalid firstbucket pointer"
,
-
1
);
Py_DECREF
(
self
->
firstbucket
);
self
->
firstbucket
=
NULL
;
}
for
(
l
=
self
->
len
,
i
=
0
;
i
<
l
;
i
++
)
{
{
if
(
i
)
DECREF_KEY
(
self
->
data
[
i
].
key
);
if
(
i
)
DECREF_KEY
(
self
->
data
[
i
].
key
);
Py_DECREF
(
self
->
data
[
i
].
value
);
Py_DECREF
(
self
->
data
[
i
].
value
);
}
}
Py_XDECREF
(
self
->
firstbucket
);
self
->
firstbucket
=
NULL
;
self
->
len
=
0
;
self
->
len
=
0
;
if
(
self
->
data
)
{
free
(
self
->
data
);
self
->
data
=
0
;
self
->
size
=
0
;
}
return
0
;
return
0
;
}
}
...
@@ -559,7 +619,7 @@ _BTree_clear(BTree *self)
...
@@ -559,7 +619,7 @@ _BTree_clear(BTree *self)
static
PyObject
*
static
PyObject
*
BTree__p_deactivate
(
BTree
*
self
,
PyObject
*
args
)
BTree__p_deactivate
(
BTree
*
self
,
PyObject
*
args
)
{
{
if
(
self
->
state
==
cPersistent_UPTODATE_STATE
)
if
(
self
->
state
==
cPersistent_UPTODATE_STATE
&&
self
->
jar
)
{
{
if
(
_BTree_clear
(
self
)
<
0
)
return
NULL
;
if
(
_BTree_clear
(
self
)
<
0
)
return
NULL
;
self
->
state
=
cPersistent_GHOST_STATE
;
self
->
state
=
cPersistent_GHOST_STATE
;
...
@@ -570,17 +630,6 @@ BTree__p_deactivate(BTree *self, PyObject *args)
...
@@ -570,17 +630,6 @@ BTree__p_deactivate(BTree *self, PyObject *args)
}
}
#endif
#endif
/*
** BTree_clear
**
** Wrapper for _BTree_clear
**
** Arguments: self the BTree
** args (unused)
**
** Returns: None on success
** NULL on failure
*/
static
PyObject
*
static
PyObject
*
BTree_clear
(
BTree
*
self
,
PyObject
*
args
)
BTree_clear
(
BTree
*
self
,
PyObject
*
args
)
{
{
...
@@ -602,12 +651,6 @@ err:
...
@@ -602,12 +651,6 @@ err:
return
NULL
;
return
NULL
;
}
}
/*
** BTree_getstate
**
** Get a tuple of all objects in a BTree
**
*/
static
PyObject
*
static
PyObject
*
BTree_getstate
(
BTree
*
self
,
PyObject
*
args
)
BTree_getstate
(
BTree
*
self
,
PyObject
*
args
)
{
{
...
@@ -619,7 +662,22 @@ BTree_getstate(BTree *self, PyObject *args)
...
@@ -619,7 +662,22 @@ BTree_getstate(BTree *self, PyObject *args)
if
(
self
->
len
)
if
(
self
->
len
)
{
{
UNLESS
(
r
=
PyTuple_New
(
self
->
len
*
2
-
1
))
goto
err
;
UNLESS
(
r
=
PyTuple_New
(
self
->
len
*
2
-
1
))
goto
err
;
for
(
i
=
self
->
len
,
l
=
0
;
--
i
>=
0
;
)
if
(
self
->
len
==
1
&&
self
->
data
->
value
->
ob_type
!=
self
->
ob_type
#ifdef PERSISTENT
&&
BUCKET
(
self
->
data
->
value
)
->
oid
==
NULL
#endif
)
{
/* We have just one bucket. Save it's data directly. */
UNLESS
(
o
=
bucket_getstate
(
BUCKET
(
self
->
data
->
value
),
NULL
))
goto
err
;
PyTuple_SET_ITEM
(
r
,
0
,
o
);
ASSIGN
(
r
,
Py_BuildValue
(
"(O)"
,
r
));
}
else
{
for
(
i
=
0
,
l
=
0
;
i
<
self
->
len
;
i
++
)
{
{
if
(
i
)
if
(
i
)
{
{
...
@@ -634,6 +692,8 @@ BTree_getstate(BTree *self, PyObject *args)
...
@@ -634,6 +692,8 @@ BTree_getstate(BTree *self, PyObject *args)
}
}
ASSIGN
(
r
,
Py_BuildValue
(
"OO"
,
r
,
self
->
firstbucket
));
ASSIGN
(
r
,
Py_BuildValue
(
"OO"
,
r
,
self
->
firstbucket
));
}
}
}
else
else
{
{
r
=
Py_None
;
r
=
Py_None
;
...
@@ -649,41 +709,27 @@ err:
...
@@ -649,41 +709,27 @@ err:
return
NULL
;
return
NULL
;
}
}
/*
static
int
** BTree_setstate
_BTree_setstate
(
BTree
*
self
,
PyObject
*
state
,
int
noval
)
**
** Bulk set all objects in a BTree from a tuple
*/
static
PyObject
*
BTree_setstate
(
BTree
*
self
,
PyObject
*
args
)
{
{
PyObject
*
state
,
*
k
,
*
v
=
0
,
*
items
;
PyObject
*
items
,
*
o
,
*
firstbucket
=
0
;
BTreeItem
*
d
;
BTreeItem
*
d
;
Bucket
*
firstbucket
;
int
len
,
l
,
i
,
r
,
copied
=
1
;
int
len
,
l
,
i
,
r
,
copied
=
1
;
if
(
!
PyArg_ParseTuple
(
args
,
"O"
,
&
state
))
return
NULL
;
if
(
_BTree_clear
(
self
)
<
0
)
return
-
1
;
PER_PREVENT_DEACTIVATION
(
self
);
if
(
_BTree_clear
(
self
)
<
0
)
goto
err
;
if
(
state
!=
Py_None
)
if
(
state
!=
Py_None
)
{
{
if
(
!
PyArg_ParseTuple
(
state
,
"O|O"
,
&
items
,
&
firstbucket
))
if
(
!
PyArg_ParseTuple
(
state
,
"O|O"
,
&
items
,
&
firstbucket
))
goto
err
;
return
-
1
;
if
((
len
=
PyTuple_Size
(
items
))
<
0
)
goto
err
;
if
((
len
=
PyTuple_Size
(
items
))
<
0
)
return
-
1
;
len
=
(
len
+
1
)
/
2
;
len
=
(
len
+
1
)
/
2
;
self
->
firstbucket
=
firstbucket
;
Py_INCREF
(
firstbucket
);
if
(
len
>
self
->
size
)
if
(
len
>
self
->
size
)
{
{
UNLESS
(
d
=
PyRealloc
(
self
->
data
,
sizeof
(
BTreeItem
)
*
len
))
goto
err
;
UNLESS
(
d
=
PyRealloc
(
self
->
data
,
sizeof
(
BTreeItem
)
*
len
))
return
-
1
;
self
->
data
=
d
;
self
->
data
=
d
;
self
->
size
=
len
;
self
->
size
=
len
;
}
}
...
@@ -692,28 +738,108 @@ BTree_setstate(BTree *self, PyObject *args)
...
@@ -692,28 +738,108 @@ BTree_setstate(BTree *self, PyObject *args)
{
{
if
(
i
)
if
(
i
)
{
{
COPY_KEY_FROM_ARG
(
d
->
key
,
PyTuple_GET_ITEM
(
state
,
l
),
&
copied
);
COPY_KEY_FROM_ARG
(
d
->
key
,
PyTuple_GET_ITEM
(
items
,
l
),
&
copied
);
l
++
;
l
++
;
UNLESS
(
&
copied
)
return
NULL
;
UNLESS
(
&
copied
)
return
-
1
;
INCREF_KEY
(
d
->
key
);
INCREF_KEY
(
d
->
key
);
}
}
d
->
value
=
PyTuple_GET_ITEM
(
state
,
l
);
d
->
value
=
PyTuple_GET_ITEM
(
items
,
l
);
l
++
;
if
(
PyTuple_Check
(
d
->
value
))
{
if
(
noval
)
{
UNLESS
(
d
->
value
=
PyObject_CallObject
(
OBJECT
(
&
SetType
),
NULL
))
return
-
1
;
if
(
_set_setstate
(
BUCKET
(
d
->
value
),
PyTuple_GET_ITEM
(
items
,
l
))
<
0
)
return
-
1
;
}
else
{
UNLESS
(
d
->
value
=
PyObject_CallObject
(
OBJECT
(
&
BucketType
),
NULL
))
return
-
1
;
if
(
_bucket_setstate
(
BUCKET
(
d
->
value
),
PyTuple_GET_ITEM
(
items
,
l
))
<
0
)
return
-
1
;
}
}
else
{
Py_INCREF
(
d
->
value
);
Py_INCREF
(
d
->
value
);
}
}
self
->
len
=
l
;
l
++
;
}
if
(
len
)
{
if
(
!
firstbucket
)
firstbucket
=
self
->
data
->
value
;
UNLESS
(
ExtensionClassSubclassInstance_Check
(
firstbucket
,
noval
?
&
SetType
:
&
BucketType
))
{
PyErr_SetString
(
PyExc_TypeError
,
"No firstbucket in non-empty BTree"
);
return
-
1
;
}
self
->
firstbucket
=
BUCKET
(
firstbucket
);
Py_INCREF
(
firstbucket
);
}
self
->
len
=
len
;
}
}
return
0
;
}
static
PyObject
*
BTree_setstate
(
BTree
*
self
,
PyObject
*
args
)
{
int
r
;
if
(
!
PyArg_ParseTuple
(
args
,
"O"
,
&
args
))
return
NULL
;
PER_PREVENT_DEACTIVATION
(
self
);
r
=
_BTree_setstate
(
self
,
args
,
0
);
PER_ALLOW_DEACTIVATION
(
self
);
PER_ALLOW_DEACTIVATION
(
self
);
if
(
r
<
0
)
return
NULL
;
Py_INCREF
(
Py_None
);
Py_INCREF
(
Py_None
);
return
Py_None
;
return
Py_None
;
err:
PER_ALLOW_DEACTIVATION
(
self
);
return
NULL
;
}
}
#ifdef PERSISTENT
static
PyObject
*
BTree__p_resolveConflict
(
BTree
*
self
,
PyObject
*
args
)
{
PyObject
*
s
[
3
],
*
r
;
int
i
;
UNLESS
(
PyArg_ParseTuple
(
args
,
"OOO"
,
s
,
s
+
1
,
s
+
2
))
return
NULL
;
/* for each state, detuplefy it twice */
for
(
i
=
0
;
i
<
3
;
i
++
)
UNLESS
(
s
[
i
]
==
Py_None
||
PyArg_ParseTuple
(
s
[
i
],
"O"
,
s
+
i
))
return
NULL
;
for
(
i
=
0
;
i
<
3
;
i
++
)
UNLESS
(
s
[
i
]
==
Py_None
||
PyArg_ParseTuple
(
s
[
i
],
"O"
,
s
+
i
))
return
NULL
;
for
(
i
=
0
;
i
<
3
;
i
++
)
/* Now make sure detupled thing is a tuple */
UNLESS
(
s
[
i
]
==
Py_None
||
PyTuple_Check
(
s
[
i
]))
return
merge_error
(
-
100
,
-
100
,
-
100
,
-
100
);
if
(
ExtensionClassSubclassInstance_Check
(
self
,
&
BTreeType
))
r
=
_bucket__p_resolveConflict
(
OBJECT
(
&
BucketType
),
s
);
else
r
=
_bucket__p_resolveConflict
(
OBJECT
(
&
SetType
),
s
);
if
(
r
)
ASSIGN
(
r
,
Py_BuildValue
(
"((O))"
,
r
));
return
r
;
}
#endif
/*
/*
BTree_findRangeEnd -- Find one end, expressed as a bucket and
BTree_findRangeEnd -- Find one end, expressed as a bucket and
...
@@ -910,7 +1036,9 @@ BTree_rangeSearch(BTree *self, PyObject *args, char type)
...
@@ -910,7 +1036,9 @@ BTree_rangeSearch(BTree *self, PyObject *args, char type)
else
else
{
{
highbucket
=
BTree_lastBucket
(
self
);
highbucket
=
BTree_lastBucket
(
self
);
UNLESS
(
PER_USE
(
highbucket
))
goto
err
;
highoffset
=
highbucket
->
len
-
1
;
highoffset
=
highbucket
->
len
-
1
;
PER_ALLOW_DEACTIVATION
(
highbucket
);
}
}
PER_ALLOW_DEACTIVATION
(
self
);
PER_ALLOW_DEACTIVATION
(
self
);
...
@@ -1046,7 +1174,8 @@ BTree_has_key(BTree *self, PyObject *args)
...
@@ -1046,7 +1174,8 @@ BTree_has_key(BTree *self, PyObject *args)
{
{
PyObject
*
key
;
PyObject
*
key
;
UNLESS
(
PyArg_ParseTuple
(
args
,
"O"
,
&
key
))
return
NULL
;
return
_BTree_get
(
self
,
key
,
1
);
UNLESS
(
PyArg_ParseTuple
(
args
,
"O"
,
&
key
))
return
NULL
;
return
_BTree_get
(
self
,
key
,
1
);
}
}
static
PyObject
*
static
PyObject
*
...
@@ -1095,7 +1224,13 @@ static struct PyMethodDef BTree_methods[] = {
...
@@ -1095,7 +1224,13 @@ static struct PyMethodDef BTree_methods[] = {
"insert(key, value) -- Add an item if the key is not already used.
\n\n
"
"insert(key, value) -- Add an item if the key is not already used.
\n\n
"
"Return 1 if the item was added, or 0 otherwise"
"Return 1 if the item was added, or 0 otherwise"
},
},
{
"update"
,
(
PyCFunction
)
Mapping_update
,
METH_VARARGS
,
"update(collection) -- Add the items from the given collection"
},
{
"__init__"
,
(
PyCFunction
)
Mapping_update
,
METH_VARARGS
,
"__init__(collection) -- Initialize with items from the given collection"
},
#ifdef PERSISTENT
#ifdef PERSISTENT
{
"_p_resolveConflict"
,
(
PyCFunction
)
BTree__p_resolveConflict
,
METH_VARARGS
,
"_p_resolveConflict() -- Reinitialize from a newly created copy"
},
{
"_p_deactivate"
,
(
PyCFunction
)
BTree__p_deactivate
,
METH_VARARGS
,
{
"_p_deactivate"
,
(
PyCFunction
)
BTree__p_deactivate
,
METH_VARARGS
,
"_p_deactivate() -- Reinitialize from a newly created copy"
},
"_p_deactivate() -- Reinitialize from a newly created copy"
},
#endif
#endif
...
@@ -1105,16 +1240,7 @@ static struct PyMethodDef BTree_methods[] = {
...
@@ -1105,16 +1240,7 @@ static struct PyMethodDef BTree_methods[] = {
static
void
static
void
BTree_dealloc
(
BTree
*
self
)
BTree_dealloc
(
BTree
*
self
)
{
{
int
i
;
_BTree_clear
(
self
);
for
(
i
=
self
->
len
;
--
i
>=
0
;
)
{
if
(
i
)
DECREF_KEY
(
self
->
data
[
i
].
key
);
Py_DECREF
(
self
->
data
[
i
].
value
);
}
if
(
self
->
data
)
free
(
self
->
data
);
Py_XDECREF
(
self
->
firstbucket
);
PER_DEL
(
self
);
PER_DEL
(
self
);
...
...
trunk/src/BTrees/BucketTemplate.c
View file @
2a1ae51f
...
@@ -111,7 +111,7 @@ _bucket_get(Bucket *self, PyObject *keyarg, int has_key)
...
@@ -111,7 +111,7 @@ _bucket_get(Bucket *self, PyObject *keyarg, int has_key)
if
(
cmp
<
0
)
min
=
i
;
if
(
cmp
<
0
)
min
=
i
;
else
if
(
cmp
==
0
)
else
if
(
cmp
==
0
)
{
{
if
(
has_key
)
r
=
PyInt_FromLong
(
1
);
if
(
has_key
)
r
=
PyInt_FromLong
(
has_key
);
else
else
{
{
COPY_VALUE_TO_OBJECT
(
r
,
self
->
values
[
i
]);
COPY_VALUE_TO_OBJECT
(
r
,
self
->
values
[
i
]);
...
@@ -184,7 +184,8 @@ Bucket_grow(Bucket *self, int noval)
...
@@ -184,7 +184,8 @@ Bucket_grow(Bucket *self, int noval)
** 1 on success with a new value (growth)
** 1 on success with a new value (growth)
*/
*/
static
int
static
int
_bucket_set
(
Bucket
*
self
,
PyObject
*
keyarg
,
PyObject
*
v
,
int
unique
,
int
noval
)
_bucket_set
(
Bucket
*
self
,
PyObject
*
keyarg
,
PyObject
*
v
,
int
unique
,
int
noval
,
int
*
changed
)
{
{
int
min
,
max
,
i
,
l
,
cmp
,
copied
=
1
;
int
min
,
max
,
i
,
l
,
cmp
,
copied
=
1
;
KEY_TYPE
key
;
KEY_TYPE
key
;
...
@@ -203,11 +204,21 @@ _bucket_set(Bucket *self, PyObject *keyarg, PyObject *v, int unique, int noval)
...
@@ -203,11 +204,21 @@ _bucket_set(Bucket *self, PyObject *keyarg, PyObject *v, int unique, int noval)
{
{
if
(
!
unique
&&
!
noval
&&
self
->
values
)
if
(
!
unique
&&
!
noval
&&
self
->
values
)
{
{
DECREF_VALUE
(
self
->
values
[
i
])
;
VALUE_TYPE
value
;
COPY_VALUE_FROM_ARG
(
self
->
values
[
i
]
,
v
,
&
copied
);
COPY_VALUE_FROM_ARG
(
value
,
v
,
&
copied
);
UNLESS
(
copied
)
return
-
1
;
UNLESS
(
copied
)
return
-
1
;
#ifdef VALUE_SAME
if
(
VALUE_SAME
(
self
->
values
[
i
],
value
))
{
/* short-circuit if no change */
PER_ALLOW_DEACTIVATION
(
self
);
return
0
;
}
#endif
if
(
changed
)
*
changed
=
1
;
DECREF_VALUE
(
self
->
values
[
i
]);
COPY_VALUE
(
self
->
values
[
i
],
value
);
INCREF_VALUE
(
self
->
values
[
i
]);
INCREF_VALUE
(
self
->
values
[
i
]);
if
(
PER_CHANGED
(
self
)
<
0
)
goto
err
;
if
(
PER_CHANGED
(
self
)
<
0
)
goto
err
;
}
}
...
@@ -308,10 +319,67 @@ err:
...
@@ -308,10 +319,67 @@ err:
static
int
static
int
bucket_setitem
(
Bucket
*
self
,
PyObject
*
key
,
PyObject
*
v
)
bucket_setitem
(
Bucket
*
self
,
PyObject
*
key
,
PyObject
*
v
)
{
{
if
(
_bucket_set
(
self
,
key
,
v
,
0
,
0
)
<
0
)
return
-
1
;
if
(
_bucket_set
(
self
,
key
,
v
,
0
,
0
,
0
)
<
0
)
return
-
1
;
return
0
;
return
0
;
}
}
static
PyObject
*
Mapping_update
(
PyObject
*
self
,
PyObject
*
args
)
{
PyObject
*
seq
=
0
,
*
o
,
*
t
,
*
v
,
*
tb
,
*
k
;
int
i
,
n
=
0
,
ind
;
UNLESS
(
PyArg_ParseTuple
(
args
,
"|O:update"
,
&
seq
))
return
NULL
;
if
(
seq
)
{
if
(
PySequence_Check
(
seq
))
{
Py_INCREF
(
seq
);
}
else
{
seq
=
PyObject_GetAttr
(
seq
,
items_str
);
UNLESS
(
seq
)
return
NULL
;
ASSIGN
(
seq
,
PyObject_CallObject
(
seq
,
NULL
));
UNLESS
(
seq
)
return
NULL
;
}
for
(
i
=
0
;
;
i
++
)
{
UNLESS
(
o
=
PySequence_GetItem
(
seq
,
i
))
{
PyErr_Fetch
(
&
t
,
&
v
,
&
tb
);
if
(
t
!=
PyExc_IndexError
)
{
PyErr_Restore
(
t
,
v
,
tb
);
goto
err
;
}
Py_XDECREF
(
t
);
Py_XDECREF
(
v
);
Py_XDECREF
(
tb
);
break
;
}
ind
=
PyArg_ParseTuple
(
o
,
"OO;items must be 2-item tuples"
,
&
k
,
&
v
);
if
(
ind
)
ind
=
PyObject_SetItem
(
self
,
k
,
v
);
else
ind
=-
1
;
Py_DECREF
(
o
);
if
(
ind
<
0
)
goto
err
;
}
}
Py_INCREF
(
Py_None
);
return
Py_None
;
err:
Py_DECREF
(
seq
);
return
NULL
;
}
/*
/*
** bucket_split
** bucket_split
**
**
...
@@ -329,6 +397,8 @@ bucket_split(Bucket *self, int index, Bucket *next)
...
@@ -329,6 +397,8 @@ bucket_split(Bucket *self, int index, Bucket *next)
{
{
int
next_size
;
int
next_size
;
ASSERT
(
self
->
len
>
1
,
"split of empty bucket"
,
-
1
);
if
(
index
<
0
||
index
>=
self
->
len
)
index
=
self
->
len
/
2
;
if
(
index
<
0
||
index
>=
self
->
len
)
index
=
self
->
len
/
2
;
next_size
=
self
->
len
-
index
;
next_size
=
self
->
len
-
index
;
...
@@ -726,21 +796,45 @@ bucket_byValue(Bucket *self, PyObject *args)
...
@@ -726,21 +796,45 @@ bucket_byValue(Bucket *self, PyObject *args)
return
NULL
;
return
NULL
;
}
}
#ifdef PERSISTENT
static
int
static
PyObject
*
_bucket_clear
(
Bucket
*
self
)
bucket__p_deactivate
(
Bucket
*
self
,
PyObject
*
args
)
{
{
if
(
self
->
state
==
cPersistent_UPTODATE_STATE
)
{
int
i
;
int
i
;
if
(
self
->
next
)
{
Py_DECREF
(
self
->
next
);
self
->
next
=
0
;
}
for
(
i
=
self
->
len
;
--
i
>=
0
;
)
for
(
i
=
self
->
len
;
--
i
>=
0
;
)
{
{
DECREF_KEY
(
self
->
keys
[
i
]);
DECREF_KEY
(
self
->
keys
[
i
]);
if
(
self
->
values
)
DECREF_VALUE
(
self
->
values
[
i
]);
if
(
self
->
values
)
DECREF_VALUE
(
self
->
values
[
i
]);
}
}
Py_XDECREF
(
self
->
next
);
self
->
len
=
0
;
self
->
len
=
0
;
if
(
self
->
values
)
{
free
(
self
->
values
);
self
->
values
=
0
;
}
if
(
self
->
keys
)
{
free
(
self
->
keys
);
self
->
keys
=
0
;
}
self
->
size
=
0
;
return
0
;
}
#ifdef PERSISTENT
static
PyObject
*
bucket__p_deactivate
(
Bucket
*
self
,
PyObject
*
args
)
{
if
(
self
->
state
==
cPersistent_UPTODATE_STATE
&&
self
->
jar
)
{
if
(
_bucket_clear
(
self
)
<
0
)
return
NULL
;
self
->
state
=
cPersistent_GHOST_STATE
;
self
->
state
=
cPersistent_GHOST_STATE
;
}
}
...
@@ -756,13 +850,11 @@ bucket_clear(Bucket *self, PyObject *args)
...
@@ -756,13 +850,11 @@ bucket_clear(Bucket *self, PyObject *args)
PER_USE_OR_RETURN
(
self
,
NULL
);
PER_USE_OR_RETURN
(
self
,
NULL
);
for
(
i
=
self
->
len
;
--
i
>=
0
;
)
if
(
self
->
len
)
{
{
DECREF_KEY
(
self
->
keys
[
i
]);
if
(
_bucket_clear
(
self
)
<
0
)
return
NULL
;
if
(
self
->
values
)
DECREF_VALUE
(
self
->
values
[
i
]);
}
self
->
len
=
0
;
if
(
PER_CHANGED
(
self
)
<
0
)
goto
err
;
if
(
PER_CHANGED
(
self
)
<
0
)
goto
err
;
}
PER_ALLOW_DEACTIVATION
(
self
);
PER_ALLOW_DEACTIVATION
(
self
);
Py_INCREF
(
Py_None
);
Py_INCREF
(
Py_None
);
return
Py_None
;
return
Py_None
;
...
@@ -778,10 +870,14 @@ bucket_getstate(Bucket *self, PyObject *args)
...
@@ -778,10 +870,14 @@ bucket_getstate(Bucket *self, PyObject *args)
PyObject
*
o
=
0
,
*
items
=
0
;
PyObject
*
o
=
0
,
*
items
=
0
;
int
i
,
len
,
l
;
int
i
,
len
,
l
;
if
(
args
&&
!
PyArg_ParseTuple
(
args
,
""
))
return
NULL
;
PER_USE_OR_RETURN
(
self
,
NULL
);
PER_USE_OR_RETURN
(
self
,
NULL
);
len
=
self
->
len
;
len
=
self
->
len
;
if
(
self
->
values
)
{
/* Bucket */
UNLESS
(
items
=
PyTuple_New
(
len
*
2
))
goto
err
;
UNLESS
(
items
=
PyTuple_New
(
len
*
2
))
goto
err
;
for
(
i
=
0
,
l
=
0
;
i
<
len
;
i
++
)
for
(
i
=
0
,
l
=
0
;
i
<
len
;
i
++
)
{
{
...
@@ -795,6 +891,17 @@ bucket_getstate(Bucket *self, PyObject *args)
...
@@ -795,6 +891,17 @@ bucket_getstate(Bucket *self, PyObject *args)
PyTuple_SET_ITEM
(
items
,
l
,
o
);
PyTuple_SET_ITEM
(
items
,
l
,
o
);
l
++
;
l
++
;
}
}
}
else
{
/* Set */
UNLESS
(
items
=
PyTuple_New
(
len
))
goto
err
;
for
(
i
=
0
;
i
<
len
;
i
++
)
{
COPY_KEY_TO_OBJECT
(
o
,
self
->
keys
[
i
]);
UNLESS
(
o
)
goto
err
;
PyTuple_SET_ITEM
(
items
,
i
,
o
);
}
}
if
(
self
->
next
)
if
(
self
->
next
)
ASSIGN
(
items
,
Py_BuildValue
(
"OO"
,
items
,
self
->
next
));
ASSIGN
(
items
,
Py_BuildValue
(
"OO"
,
items
,
self
->
next
));
...
@@ -811,19 +918,8 @@ err:
...
@@ -811,19 +918,8 @@ err:
return
NULL
;
return
NULL
;
}
}
/*
static
int
** bucket_setstate
_bucket_setstate
(
Bucket
*
self
,
PyObject
*
args
)
**
** bulk set of all items in bucket
**
** Arguments: self The Bucket
** args The object pointng to the two lists of tuples
**
** Returns: None on success
** NULL on error
*/
static
PyObject
*
bucket_setstate
(
Bucket
*
self
,
PyObject
*
args
)
{
{
PyObject
*
k
,
*
v
,
*
r
,
*
items
;
PyObject
*
k
,
*
v
,
*
r
,
*
items
;
Bucket
*
next
=
0
;
Bucket
*
next
=
0
;
...
@@ -831,14 +927,11 @@ bucket_setstate(Bucket *self, PyObject *args)
...
@@ -831,14 +927,11 @@ bucket_setstate(Bucket *self, PyObject *args)
KEY_TYPE
*
keys
;
KEY_TYPE
*
keys
;
VALUE_TYPE
*
values
;
VALUE_TYPE
*
values
;
PER_PREVENT_DEACTIVATION
(
self
);
UNLESS
(
PyArg_ParseTuple
(
args
,
"O|O"
,
&
items
,
&
next
))
return
-
1
;
UNLESS
(
PyArg_ParseTuple
(
args
,
"O"
,
&
args
))
goto
err
;
UNLESS
(
PyArg_ParseTuple
(
args
,
"O|O!"
,
&
items
,
self
->
ob_type
,
&
next
))
goto
err
;
if
((
len
=
PyTuple_Size
(
items
))
<
0
)
goto
err
;
if
((
len
=
PyTuple_Size
(
items
))
<
0
)
return
-
1
;
len
/=
2
;
for
(
i
=
self
->
len
;
--
i
>=
0
;
)
for
(
i
=
self
->
len
;
--
i
>=
0
;
)
{
{
...
@@ -855,8 +948,10 @@ bucket_setstate(Bucket *self, PyObject *args)
...
@@ -855,8 +948,10 @@ bucket_setstate(Bucket *self, PyObject *args)
if
(
len
>
self
->
size
)
if
(
len
>
self
->
size
)
{
{
UNLESS
(
keys
=
PyRealloc
(
self
->
keys
,
sizeof
(
KEY_TYPE
)
*
len
))
goto
err
;
UNLESS
(
keys
=
PyRealloc
(
self
->
keys
,
sizeof
(
KEY_TYPE
)
*
len
))
UNLESS
(
values
=
PyRealloc
(
self
->
values
,
sizeof
(
KEY_TYPE
)
*
len
))
goto
err
;
return
-
1
;
UNLESS
(
values
=
PyRealloc
(
self
->
values
,
sizeof
(
KEY_TYPE
)
*
len
))
return
-
1
;
self
->
keys
=
keys
;
self
->
keys
=
keys
;
self
->
values
=
values
;
self
->
values
=
values
;
self
->
size
=
len
;
self
->
size
=
len
;
...
@@ -870,24 +965,40 @@ bucket_setstate(Bucket *self, PyObject *args)
...
@@ -870,24 +965,40 @@ bucket_setstate(Bucket *self, PyObject *args)
l
++
;
l
++
;
COPY_KEY_FROM_ARG
(
self
->
keys
[
i
],
k
,
&
copied
);
COPY_KEY_FROM_ARG
(
self
->
keys
[
i
],
k
,
&
copied
);
UNLESS
(
copied
)
return
NULL
;
UNLESS
(
copied
)
return
-
1
;
COPY_VALUE_FROM_ARG
(
self
->
values
[
i
],
v
,
&
copied
);
COPY_VALUE_FROM_ARG
(
self
->
values
[
i
],
v
,
&
copied
);
UNLESS
(
copied
)
return
NULL
;
UNLESS
(
copied
)
return
-
1
;
INCREF_KEY
(
k
);
INCREF_KEY
(
self
->
keys
[
i
]);
INCREF_VALUE
(
v
);
INCREF_VALUE
(
self
->
values
[
i
]);
}
self
->
len
=
len
;
if
(
next
)
{
self
->
next
=
next
;
Py_INCREF
(
next
);
}
}
self
->
len
=
l
;
PER_ALLOW_DEACTIVATION
(
self
)
;
return
0
;
}
static
PyObject
*
bucket_setstate
(
Bucket
*
self
,
PyObject
*
args
)
{
int
r
;
UNLESS
(
PyArg_ParseTuple
(
args
,
"O"
,
&
args
))
return
NULL
;
PER_PREVENT_DEACTIVATION
(
self
);
r
=
_bucket_setstate
(
self
,
args
);
PER_ALLOW_DEACTIVATION
(
self
);
PER_ALLOW_DEACTIVATION
(
self
);
if
(
r
<
0
)
return
NULL
;
Py_INCREF
(
Py_None
);
Py_INCREF
(
Py_None
);
return
Py_None
;
return
Py_None
;
perr:
self
->
len
=
i
;
err:
PER_ALLOW_DEACTIVATION
(
self
);
return
NULL
;
}
}
/*
/*
...
@@ -919,6 +1030,73 @@ bucket_getm(Bucket *self, PyObject *args)
...
@@ -919,6 +1030,73 @@ bucket_getm(Bucket *self, PyObject *args)
return
d
;
return
d
;
}
}
#ifdef PERSISTENT
static
PyObject
*
merge_error
(
int
p1
,
int
p2
,
int
p3
,
int
reason
);
static
PyObject
*
bucket_merge
(
Bucket
*
s1
,
Bucket
*
s2
,
Bucket
*
s3
);
static
PyObject
*
_bucket__p_resolveConflict
(
PyObject
*
ob_type
,
PyObject
*
s
[
3
])
{
PyObject
*
r
=
0
,
*
a
;
Bucket
*
b
[
3
];
int
i
;
for
(
i
=
0
;
i
<
3
;
i
++
)
{
if
(
b
[
i
]
=
(
Bucket
*
)
PyObject_CallObject
(
OBJECT
(
ob_type
),
NULL
))
{
if
(
s
[
i
]
==
Py_None
)
/* None is equivalent to empty, for BTrees */
continue
;
ASSIGN
(
r
,
PyObject_GetAttr
(
OBJECT
(
b
[
i
]),
__setstate___str
));
if
(
a
=
PyTuple_New
(
1
))
{
if
(
r
)
{
PyTuple_SET_ITEM
(
a
,
0
,
s
[
i
]);
Py_INCREF
(
s
[
i
]);
ASSIGN
(
r
,
PyObject_CallObject
(
r
,
a
));
}
Py_DECREF
(
a
);
if
(
r
)
continue
;
}
}
Py_XDECREF
(
r
);
while
(
--
i
>=
0
)
{
Py_DECREF
(
b
[
i
]);
}
return
NULL
;
}
Py_DECREF
(
r
);
r
=
NULL
;
if
(
b
[
0
]
->
next
!=
b
[
1
]
->
next
||
b
[
0
]
->
next
!=
b
[
2
]
->
next
)
{
merge_error
(
-
1
,
-
1
,
-
1
,
-
1
);
goto
err
;
}
r
=
bucket_merge
(
b
[
0
],
b
[
1
],
b
[
2
]);
err:
Py_DECREF
(
b
[
0
]);
Py_DECREF
(
b
[
1
]);
Py_DECREF
(
b
[
2
]);
return
r
;
}
static
PyObject
*
bucket__p_resolveConflict
(
Bucket
*
self
,
PyObject
*
args
)
{
PyObject
*
s
[
3
];
UNLESS
(
PyArg_ParseTuple
(
args
,
"OOO"
,
&
s
[
0
],
&
s
[
1
],
&
s
[
2
]))
return
NULL
;
return
_bucket__p_resolveConflict
(
OBJECT
(
self
->
ob_type
),
s
);
}
#endif
static
struct
PyMethodDef
Bucket_methods
[]
=
{
static
struct
PyMethodDef
Bucket_methods
[]
=
{
{
"__getstate__"
,
(
PyCFunction
)
bucket_getstate
,
METH_VARARGS
,
{
"__getstate__"
,
(
PyCFunction
)
bucket_getstate
,
METH_VARARGS
,
"__getstate__() -- Return the picklable state of the object"
},
"__getstate__() -- Return the picklable state of the object"
},
...
@@ -930,6 +1108,10 @@ static struct PyMethodDef Bucket_methods[] = {
...
@@ -930,6 +1108,10 @@ static struct PyMethodDef Bucket_methods[] = {
"has_key(key) -- Test whether the bucket contains the given key"
},
"has_key(key) -- Test whether the bucket contains the given key"
},
{
"clear"
,
(
PyCFunction
)
bucket_clear
,
METH_VARARGS
,
{
"clear"
,
(
PyCFunction
)
bucket_clear
,
METH_VARARGS
,
"clear() -- Remove all of the items from the bucket"
},
"clear() -- Remove all of the items from the bucket"
},
{
"update"
,
(
PyCFunction
)
Mapping_update
,
METH_VARARGS
,
"update(collection) -- Add the items from the given collection"
},
{
"__init__"
,
(
PyCFunction
)
Mapping_update
,
METH_VARARGS
,
"__init__(collection) -- Initialize with items from the given collection"
},
{
"maxKey"
,
(
PyCFunction
)
Bucket_maxKey
,
METH_VARARGS
,
{
"maxKey"
,
(
PyCFunction
)
Bucket_maxKey
,
METH_VARARGS
,
"maxKey([key]) -- Fine the maximum key
\n\n
"
"maxKey([key]) -- Fine the maximum key
\n\n
"
"If an argument is given, find the maximum <= the argument"
},
"If an argument is given, find the maximum <= the argument"
},
...
@@ -949,6 +1131,8 @@ static struct PyMethodDef Bucket_methods[] = {
...
@@ -949,6 +1131,8 @@ static struct PyMethodDef Bucket_methods[] = {
"Return the default (or None) if the key is not found."
"Return the default (or None) if the key is not found."
},
},
#ifdef PERSISTENT
#ifdef PERSISTENT
{
"_p_resolveConflict"
,
(
PyCFunction
)
bucket__p_resolveConflict
,
METH_VARARGS
,
"_p_resolveConflict() -- Reinitialize from a newly created copy"
},
{
"_p_deactivate"
,
(
PyCFunction
)
bucket__p_deactivate
,
METH_VARARGS
,
{
"_p_deactivate"
,
(
PyCFunction
)
bucket__p_deactivate
,
METH_VARARGS
,
"_p_deactivate() -- Reinitialize from a newly created copy"
},
"_p_deactivate() -- Reinitialize from a newly created copy"
},
#endif
#endif
...
@@ -960,13 +1144,8 @@ Bucket_dealloc(Bucket *self)
...
@@ -960,13 +1144,8 @@ Bucket_dealloc(Bucket *self)
{
{
int
i
;
int
i
;
for
(
i
=
self
->
len
;
--
i
>=
0
;
)
_bucket_clear
(
self
);
{
DECREF_KEY
(
self
->
keys
[
i
]);
if
(
self
->
values
)
DECREF_VALUE
(
self
->
values
[
i
]);
}
free
(
self
->
keys
);
free
(
self
->
values
);
PER_DEL
(
self
);
PER_DEL
(
self
);
Py_DECREF
(
self
->
ob_type
);
Py_DECREF
(
self
->
ob_type
);
...
...
trunk/src/BTrees/IOBTree.c
View file @
2a1ae51f
...
@@ -2,7 +2,7 @@
...
@@ -2,7 +2,7 @@
#define PERSISTENT
#define PERSISTENT
#define PREFIX "IO"
#define PREFIX "IO"
#define DEFAULT_MAX_BUCKET_SIZE
3
0
#define DEFAULT_MAX_BUCKET_SIZE
6
0
#define DEFAULT_MAX_BTREE_SIZE 500
#define DEFAULT_MAX_BTREE_SIZE 500
#define INITMODULE initIOBTree
#define INITMODULE initIOBTree
...
...
trunk/src/BTrees/Interfaces.py
View file @
2a1ae51f
##############################################################################
#
# Zope Public License (ZPL) Version 1.0
# -------------------------------------
#
# Copyright (c) Digital Creations. All rights reserved.
#
# This license has been certified as Open Source(tm).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions in source code must retain the above copyright
# notice, this list of conditions, and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. Digital Creations requests that attribution be given to Zope
# in any manner possible. Zope includes a "Powered by Zope"
# button that is installed by default. While it is not a license
# violation to remove this button, it is requested that the
# attribution remain. A significant investment has been put
# into Zope, and this effort will continue if the Zope community
# continues to grow. This is one way to assure that growth.
#
# 4. All advertising materials and documentation mentioning
# features derived from or use of this software must display
# the following acknowledgement:
#
# "This product includes software developed by Digital Creations
# for use in the Z Object Publishing Environment
# (http://www.zope.org/)."
#
# In the event that the product being advertised includes an
# intact Zope distribution (with copyright and license included)
# then this clause is waived.
#
# 5. Names associated with Zope or Digital Creations must not be used to
# endorse or promote products derived from this software without
# prior written permission from Digital Creations.
#
# 6. Modified redistributions of any form whatsoever must retain
# the following acknowledgment:
#
# "This product includes software developed by Digital Creations
# for use in the Z Object Publishing Environment
# (http://www.zope.org/)."
#
# Intact (re-)distributions of any official Zope release do not
# require an external acknowledgement.
#
# 7. Modifications are encouraged but must be packaged separately as
# patches to official Zope releases. Distributions that do not
# clearly separate the patches from the original work must be clearly
# labeled as unofficial distributions. Modifications which do not
# carry the name Zope may be packaged in any form, as long as they
# conform to all of the clauses above.
#
#
# Disclaimer
#
# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS ``AS IS'' AND ANY
# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL CREATIONS OR ITS
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
#
# This software consists of contributions made by Digital Creations and
# many individuals on behalf of Digital Creations. Specific
# attributions are listed in the accompanying credits file.
#
##############################################################################
import
OOBTree
,
Interface
import
OOBTree
,
Interface
...
@@ -75,6 +159,9 @@ class ISetMutable(IKeyed):
...
@@ -75,6 +159,9 @@ class ISetMutable(IKeyed):
def
remove
(
key
):
def
remove
(
key
):
"""Remove the key from the set."""
"""Remove the key from the set."""
def
update
(
seq
):
"""Add the items from the given sequence to the set"""
class
IKeySequence
(
IKeyed
,
ISized
):
class
IKeySequence
(
IKeyed
,
ISized
):
def
__getitem__
(
index
):
def
__getitem__
(
index
):
...
@@ -90,6 +177,7 @@ class ISet(IKeySequence, ISetMutable):
...
@@ -90,6 +177,7 @@ class ISet(IKeySequence, ISetMutable):
class
ITreeSet
(
IKeyed
,
ISetMutable
):
class
ITreeSet
(
IKeyed
,
ISetMutable
):
pass
pass
class
IDictionaryIsh
(
IKeyed
,
ISized
):
class
IDictionaryIsh
(
IKeyed
,
ISized
):
def
__getitem__
(
key
):
def
__getitem__
(
key
):
...
@@ -116,6 +204,14 @@ class IDictionaryIsh(IKeyed, ISized):
...
@@ -116,6 +204,14 @@ class IDictionaryIsh(IKeyed, ISized):
Raise a key error if the key if not in the collection."""
Raise a key error if the key if not in the collection."""
def
update
(
collection
):
"""Add the items from the given collection object to the collection
The input collection must be a sequence of key-value tuples,
or an object with an 'items' method that returns a sequence of
key-value tuples.
"""
def
values
(
min
=
None
,
max
=
None
):
def
values
(
min
=
None
,
max
=
None
):
"""Return a IReadSequence containing the values in the collection
"""Return a IReadSequence containing the values in the collection
...
...
trunk/src/BTrees/Length.py
0 → 100644
View file @
2a1ae51f
##############################################################################
#
# Zope Public License (ZPL) Version 1.0
# -------------------------------------
#
# Copyright (c) Digital Creations. All rights reserved.
#
# This license has been certified as Open Source(tm).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions in source code must retain the above copyright
# notice, this list of conditions, and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. Digital Creations requests that attribution be given to Zope
# in any manner possible. Zope includes a "Powered by Zope"
# button that is installed by default. While it is not a license
# violation to remove this button, it is requested that the
# attribution remain. A significant investment has been put
# into Zope, and this effort will continue if the Zope community
# continues to grow. This is one way to assure that growth.
#
# 4. All advertising materials and documentation mentioning
# features derived from or use of this software must display
# the following acknowledgement:
#
# "This product includes software developed by Digital Creations
# for use in the Z Object Publishing Environment
# (http://www.zope.org/)."
#
# In the event that the product being advertised includes an
# intact Zope distribution (with copyright and license included)
# then this clause is waived.
#
# 5. Names associated with Zope or Digital Creations must not be used to
# endorse or promote products derived from this software without
# prior written permission from Digital Creations.
#
# 6. Modified redistributions of any form whatsoever must retain
# the following acknowledgment:
#
# "This product includes software developed by Digital Creations
# for use in the Z Object Publishing Environment
# (http://www.zope.org/)."
#
# Intact (re-)distributions of any official Zope release do not
# require an external acknowledgement.
#
# 7. Modifications are encouraged but must be packaged separately as
# patches to official Zope releases. Distributions that do not
# clearly separate the patches from the original work must be clearly
# labeled as unofficial distributions. Modifications which do not
# carry the name Zope may be packaged in any form, as long as they
# conform to all of the clauses above.
#
#
# Disclaimer
#
# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS ``AS IS'' AND ANY
# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL CREATIONS OR ITS
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
#
# This software consists of contributions made by Digital Creations and
# many individuals on behalf of Digital Creations. Specific
# attributions are listed in the accompanying credits file.
#
##############################################################################
import
Persistence
class
Length
(
Persistence
.
Persistent
):
"""BTree lengths aqre too expensive to compute
Objects that use BTrees need to keep track of lengths themselves.
This class provides an object for doing this.
As a bonus, the object support application-level conflict resolution.
"""
def
__init__
(
self
,
v
=
0
):
self
.
value
=
v
def
__getstate__
(
self
):
return
self
.
value
def
__setstate__
(
self
,
v
):
self
.
value
=
v
def
set
(
self
,
v
):
self
.
value
=
v
def
_p_resolveConflict
(
self
,
old
,
s1
,
s2
):
return
s1
+
s2
-
old
def
_p_independent
(
self
):
# My state doesn't depend on or materially effect the state of
# other objects.
return
1
def
change
(
self
,
delta
):
self
.
value
=
self
.
value
+
delta
def
__call__
(
self
,
*
args
):
return
self
.
value
trunk/src/BTrees/MergeTemplate.c
0 → 100644
View file @
2a1ae51f
/*****************************************************************************
Zope Public License (ZPL) Version 1.0
-------------------------------------
Copyright (c) Digital Creations. All rights reserved.
This license has been certified as Open Source(tm).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions in source code must retain the above copyright
notice, this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions, and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Digital Creations requests that attribution be given to Zope
in any manner possible. Zope includes a "Powered by Zope"
button that is installed by default. While it is not a license
violation to remove this button, it is requested that the
attribution remain. A significant investment has been put
into Zope, and this effort will continue if the Zope community
continues to grow. This is one way to assure that growth.
4. All advertising materials and documentation mentioning
features derived from or use of this software must display
the following acknowledgement:
"This product includes software developed by Digital Creations
for use in the Z Object Publishing Environment
(http://www.zope.org/)."
In the event that the product being advertised includes an
intact Zope distribution (with copyright and license included)
then this clause is waived.
5. Names associated with Zope or Digital Creations must not be used to
endorse or promote products derived from this software without
prior written permission from Digital Creations.
6. Modified redistributions of any form whatsoever must retain
the following acknowledgment:
"This product includes software developed by Digital Creations
for use in the Z Object Publishing Environment
(http://www.zope.org/)."
Intact (re-)distributions of any official Zope release do not
require an external acknowledgement.
7. Modifications are encouraged but must be packaged separately as
patches to official Zope releases. Distributions that do not
clearly separate the patches from the original work must be clearly
labeled as unofficial distributions. Modifications which do not
carry the name Zope may be packaged in any form, as long as they
conform to all of the clauses above.
Disclaimer
THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS ``AS IS'' AND ANY
EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL CREATIONS OR ITS
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
This software consists of contributions made by Digital Creations and
many individuals on behalf of Digital Creations. Specific
attributions are listed in the accompanying credits file.
****************************************************************************/
/****************************************************************************
Set operations
****************************************************************************/
static
int
merge_output
(
Bucket
*
r
,
SetIteration
*
i
,
int
mapping
)
{
if
(
r
->
len
>=
r
->
size
&&
Bucket_grow
(
r
,
!
mapping
)
<
0
)
return
-
1
;
COPY_KEY
(
r
->
keys
[
r
->
len
],
i
->
key
);
INCREF_KEY
(
r
->
keys
[
r
->
len
]);
if
(
mapping
)
{
COPY_VALUE
(
r
->
values
[
r
->
len
],
i
->
value
);
INCREF_VALUE
(
r
->
values
[
r
->
len
]);
}
r
->
len
++
;
return
0
;
}
static
PyObject
*
merge_error
(
int
p1
,
int
p2
,
int
p3
,
int
reason
)
{
PyObject
*
r
;
UNLESS
(
r
=
Py_BuildValue
(
"iiii"
,
p1
,
p2
,
p3
,
reason
))
r
=
Py_None
;
PyErr_SetObject
(
PyExc_ValueError
,
r
);
if
(
r
!=
Py_None
)
Py_DECREF
(
r
);
return
NULL
;
}
static
PyObject
*
bucket_merge
(
Bucket
*
s1
,
Bucket
*
s2
,
Bucket
*
s3
)
{
Bucket
*
r
=
0
;
PyObject
*
s
;
SetIteration
i1
=
{
0
,
0
,
0
},
i2
=
{
0
,
0
,
0
},
i3
=
{
0
,
0
,
0
},
it
;
int
cmp12
,
cmp13
,
cmp23
,
mapping
=
0
,
set
;
if
(
initSetIteration
(
&
i1
,
OBJECT
(
s1
),
0
,
&
mapping
)
<
0
)
return
NULL
;
if
(
initSetIteration
(
&
i2
,
OBJECT
(
s2
),
0
,
&
mapping
)
<
0
)
return
NULL
;
if
(
initSetIteration
(
&
i3
,
OBJECT
(
s3
),
0
,
&
mapping
)
<
0
)
return
NULL
;
set
=
!
mapping
;
if
(
mapping
)
{
UNLESS
(
r
=
BUCKET
(
PyObject_CallObject
(
OBJECT
(
&
BucketType
),
NULL
)))
goto
err
;
}
else
{
UNLESS
(
r
=
BUCKET
(
PyObject_CallObject
(
OBJECT
(
&
SetType
),
NULL
)))
goto
err
;
}
if
(
i1
.
next
(
&
i1
)
<
0
)
return
NULL
;
if
(
i2
.
next
(
&
i2
)
<
0
)
return
NULL
;
if
(
i3
.
next
(
&
i3
)
<
0
)
return
NULL
;
while
(
i1
.
position
>=
0
&&
i2
.
position
>=
0
&&
i3
.
position
>=
0
)
{
cmp12
=
TEST_KEY
(
i1
.
key
,
i2
.
key
);
cmp13
=
TEST_KEY
(
i1
.
key
,
i3
.
key
);
if
(
cmp12
==
0
)
{
if
(
cmp13
==
0
)
{
if
(
set
||
(
TEST_VALUE
(
i1
.
value
,
i2
.
value
)
==
0
))
{
/* change in i3 or all same */
if
(
merge_output
(
r
,
&
i3
,
mapping
)
<
0
)
goto
err
;
}
else
if
(
set
||
(
TEST_VALUE
(
i1
.
value
,
i3
.
value
)
==
0
))
{
/* change in i2 */
if
(
merge_output
(
r
,
&
i2
,
mapping
)
<
0
)
goto
err
;
}
else
{
/* conflicting changes in i2 and i3 */
merge_error
(
i1
.
position
,
i2
.
position
,
i3
.
position
,
1
);
goto
err
;
}
if
(
i1
.
next
(
&
i1
)
<
0
)
goto
err
;
if
(
i2
.
next
(
&
i2
)
<
0
)
goto
err
;
if
(
i3
.
next
(
&
i3
)
<
0
)
goto
err
;
}
else
if
(
cmp13
>
0
)
{
/* insert i3 */
if
(
merge_output
(
r
,
&
i3
,
mapping
)
<
0
)
goto
err
;
if
(
i3
.
next
(
&
i3
)
<
0
)
goto
err
;
}
else
if
(
set
||
(
TEST_VALUE
(
i1
.
value
,
i2
.
value
)
==
0
))
{
/* delete i3 */
if
(
i1
.
next
(
&
i1
)
<
0
)
goto
err
;
if
(
i2
.
next
(
&
i2
)
<
0
)
goto
err
;
}
else
{
/* conflicting del in i3 and change in i2 */
merge_error
(
i1
.
position
,
i2
.
position
,
i3
.
position
,
2
);
goto
err
;
}
}
else
if
(
cmp13
==
0
)
{
if
(
cmp12
>
0
)
{
/* insert i2 */
if
(
merge_output
(
r
,
&
i2
,
mapping
)
<
0
)
goto
err
;
if
(
i2
.
next
(
&
i2
)
<
0
)
goto
err
;
}
else
if
(
set
||
(
TEST_VALUE
(
i1
.
value
,
i3
.
value
)
==
0
))
{
/* delete i2 */
if
(
i1
.
next
(
&
i1
)
<
0
)
goto
err
;
if
(
i3
.
next
(
&
i3
)
<
0
)
goto
err
;
}
else
{
/* conflicting del in i2 and change in i3 */
merge_error
(
i1
.
position
,
i2
.
position
,
i3
.
position
,
3
);
goto
err
;
}
}
else
{
/* Both keys changed */
cmp23
=
TEST_KEY
(
i2
.
key
,
i3
.
key
);
if
(
cmp23
==
0
)
{
/* dualing inserts or deletes */
merge_error
(
i1
.
position
,
i2
.
position
,
i3
.
position
,
4
);
goto
err
;
}
if
(
cmp12
>
0
)
{
/* insert i2 */
if
(
cmp23
>
0
)
{
/* insert i3 first */
if
(
merge_output
(
r
,
&
i3
,
mapping
)
<
0
)
goto
err
;
if
(
i3
.
next
(
&
i3
)
<
0
)
goto
err
;
}
else
{
/* insert i2 first */
if
(
merge_output
(
r
,
&
i2
,
mapping
)
<
0
)
goto
err
;
if
(
i2
.
next
(
&
i2
)
<
0
)
goto
err
;
}
}
else
if
(
cmp13
>
0
)
{
/* Insert i3 */
if
(
merge_output
(
r
,
&
i3
,
mapping
)
<
0
)
goto
err
;
if
(
i3
.
next
(
&
i3
)
<
0
)
goto
err
;
}
else
{
/* Dueling deletes */
merge_error
(
i1
.
position
,
i2
.
position
,
i3
.
position
,
5
);
goto
err
;
}
}
}
while
(
i2
.
position
>=
0
&&
i3
.
position
>=
0
)
{
/* New inserts */
cmp23
=
TEST_KEY
(
i2
.
key
,
i3
.
key
);
if
(
cmp23
==
0
)
{
/* dualing inserts */
merge_error
(
i1
.
position
,
i2
.
position
,
i3
.
position
,
6
);
goto
err
;
}
if
(
cmp23
>
0
)
{
/* insert i3 */
if
(
merge_output
(
r
,
&
i3
,
mapping
)
<
0
)
goto
err
;
if
(
i3
.
next
(
&
i3
)
<
0
)
goto
err
;
}
else
{
/* insert i2 */
if
(
merge_output
(
r
,
&
i2
,
mapping
)
<
0
)
goto
err
;
if
(
i2
.
next
(
&
i2
)
<
0
)
goto
err
;
}
}
while
(
i1
.
position
>=
0
&&
i2
.
position
>=
0
)
{
/* deleting i3 */
cmp12
=
TEST_KEY
(
i1
.
key
,
i2
.
key
);
if
(
cmp12
>
0
)
{
/* insert i2 */
if
(
merge_output
(
r
,
&
i2
,
mapping
)
<
0
)
goto
err
;
if
(
i2
.
next
(
&
i2
)
<
0
)
goto
err
;
}
else
if
(
cmp12
==
0
&&
(
set
||
(
TEST_VALUE
(
i1
.
value
,
i2
.
value
)
==
0
)))
{
/* delete i3 */
if
(
i1
.
next
(
&
i1
)
<
0
)
goto
err
;
if
(
i2
.
next
(
&
i2
)
<
0
)
goto
err
;
}
else
{
/* Dualing deletes or delete and change */
merge_error
(
i1
.
position
,
i2
.
position
,
i3
.
position
,
7
);
goto
err
;
}
}
while
(
i1
.
position
>=
0
&&
i3
.
position
>=
0
)
{
/* deleting i2 */
cmp13
=
TEST_KEY
(
i1
.
key
,
i3
.
key
);
if
(
cmp13
>
0
)
{
/* insert i3 */
if
(
merge_output
(
r
,
&
i3
,
mapping
)
<
0
)
goto
err
;
if
(
i3
.
next
(
&
i3
)
<
0
)
goto
err
;
}
else
if
(
cmp13
==
0
&&
(
set
||
(
TEST_VALUE
(
i1
.
value
,
i3
.
value
)
==
0
)))
{
/* delete i2 */
if
(
i1
.
next
(
&
i1
)
<
0
)
goto
err
;
if
(
i3
.
next
(
&
i3
)
<
0
)
goto
err
;
}
else
{
/* Dualing deletes or delete and change */
merge_error
(
i1
.
position
,
i2
.
position
,
i3
.
position
,
8
);
goto
err
;
}
}
if
(
i1
.
position
>=
0
)
{
/* Dueling deletes */
merge_error
(
i1
.
position
,
i2
.
position
,
i3
.
position
,
9
);
goto
err
;
}
while
(
i2
.
position
>=
0
)
{
/* Inserting i2 at end */
if
(
merge_output
(
r
,
&
i2
,
mapping
)
<
0
)
goto
err
;
if
(
i2
.
next
(
&
i2
)
<
0
)
goto
err
;
}
while
(
i3
.
position
>=
0
)
{
/* Inserting i2 at end */
if
(
merge_output
(
r
,
&
i3
,
mapping
)
<
0
)
goto
err
;
if
(
i3
.
next
(
&
i3
)
<
0
)
goto
err
;
}
Py_DECREF
(
i1
.
set
);
Py_DECREF
(
i2
.
set
);
Py_DECREF
(
i3
.
set
);
if
(
s1
->
next
)
{
Py_INCREF
(
s1
->
next
);
r
->
next
=
s1
->
next
;
}
s
=
bucket_getstate
(
r
,
NULL
);
Py_DECREF
(
r
);
return
s
;
invalid_set_operation:
PyErr_SetString
(
PyExc_TypeError
,
"invalid set operation"
);
err:
Py_XDECREF
(
i1
.
set
);
Py_XDECREF
(
i2
.
set
);
Py_XDECREF
(
i3
.
set
);
Py_XDECREF
(
r
);
return
NULL
;
}
trunk/src/BTrees/SetOpTemplate.c
View file @
2a1ae51f
...
@@ -110,6 +110,23 @@ nextIntSet(SetIteration *i)
...
@@ -110,6 +110,23 @@ nextIntSet(SetIteration *i)
}
}
#endif
#endif
#ifdef KEY_CHECK
static
int
nextKeyAsSet
(
SetIteration
*
i
)
{
if
(
i
->
position
>=
0
)
{
if
(
i
->
position
<
1
)
{
i
->
position
++
;
}
else
i
->
position
=
-
1
;
}
return
0
;
}
#endif
static
int
static
int
initSetIteration
(
SetIteration
*
i
,
PyObject
*
s
,
int
w
,
int
*
merge
)
initSetIteration
(
SetIteration
*
i
,
PyObject
*
s
,
int
w
,
int
*
merge
)
{
{
...
@@ -169,6 +186,19 @@ initSetIteration(SetIteration *i, PyObject *s, int w, int *merge)
...
@@ -169,6 +186,19 @@ initSetIteration(SetIteration *i, PyObject *s, int w, int *merge)
i
->
next
=
nextIntSet
;
i
->
next
=
nextIntSet
;
i
->
hasValue
=
0
;
i
->
hasValue
=
0
;
}
}
#endif
#ifdef KEY_CHECK
else
if
(
KEY_CHECK
(
s
))
{
int
copied
=
1
;
i
->
set
=
s
;
Py_INCREF
(
s
);
i
->
next
=
nextKeyAsSet
;
i
->
hasValue
=
0
;
COPY_KEY_FROM_ARG
(
i
->
key
,
s
,
&
copied
);
UNLESS
(
copied
)
return
-
1
;
}
#endif
#endif
else
else
{
{
...
@@ -300,7 +330,7 @@ set_operation(PyObject *s1, PyObject *s2,
...
@@ -300,7 +330,7 @@ set_operation(PyObject *s1, PyObject *s2,
{
{
if
(
c2
)
if
(
c2
)
{
{
if
(
r
->
len
>=
r
->
size
&&
Bucket_grow
(
r
,
1
)
<
0
)
goto
err
;
if
(
r
->
len
>=
r
->
size
&&
Bucket_grow
(
r
,
!
merge
)
<
0
)
goto
err
;
COPY_KEY
(
r
->
keys
[
r
->
len
],
i2
.
key
);
COPY_KEY
(
r
->
keys
[
r
->
len
],
i2
.
key
);
INCREF_KEY
(
r
->
keys
[
r
->
len
]);
INCREF_KEY
(
r
->
keys
[
r
->
len
]);
if
(
merge
)
if
(
merge
)
...
...
trunk/src/BTrees/SetTemplate.c
View file @
2a1ae51f
...
@@ -90,58 +90,59 @@ Set_insert(Bucket *self, PyObject *args)
...
@@ -90,58 +90,59 @@ Set_insert(Bucket *self, PyObject *args)
int
i
;
int
i
;
UNLESS
(
PyArg_ParseTuple
(
args
,
"O"
,
&
key
))
return
NULL
;
UNLESS
(
PyArg_ParseTuple
(
args
,
"O"
,
&
key
))
return
NULL
;
if
(
(
i
=
_bucket_set
(
self
,
key
,
Py_None
,
1
,
1
))
<
0
)
return
NULL
;
if
(
(
i
=
_bucket_set
(
self
,
key
,
Py_None
,
1
,
1
,
0
))
<
0
)
return
NULL
;
return
PyInt_FromLong
(
i
);
return
PyInt_FromLong
(
i
);
}
}
static
PyObject
*
static
PyObject
*
Set_
remov
e
(
Bucket
*
self
,
PyObject
*
args
)
Set_
updat
e
(
Bucket
*
self
,
PyObject
*
args
)
{
{
PyObject
*
key
;
PyObject
*
seq
=
0
,
*
o
,
*
t
,
*
v
,
*
tb
;
int
i
,
n
=
0
,
ind
;
UNLESS
(
PyArg_ParseTuple
(
args
,
"O"
,
&
key
))
return
NULL
;
if
(
_bucket_set
(
self
,
key
,
NULL
,
0
,
1
)
<
0
)
return
NULL
;
Py_INCREF
(
Py_None
);
UNLESS
(
PyArg_ParseTuple
(
args
,
"|O:update"
,
&
seq
))
return
NULL
;
return
Py_None
;
}
static
PyObject
*
if
(
seq
)
set_getstate
(
Bucket
*
self
,
PyObject
*
args
)
{
{
for
(
i
=
0
;
;
i
++
)
PyObject
*
r
=
0
,
*
o
=
0
,
*
items
=
0
;
int
i
,
l
;
PER_USE_OR_RETURN
(
self
,
NULL
);
l
=
self
->
len
;
UNLESS
(
items
=
PyTuple_New
(
self
->
len
))
goto
err
;
for
(
i
=
0
;
i
<
l
;
i
++
)
{
{
COPY_KEY_TO_OBJECT
(
o
,
self
->
keys
[
i
]);
UNLESS
(
o
=
PySequence_GetItem
(
seq
,
i
))
UNLESS
(
o
)
goto
err
;
{
PyTuple_SET_ITEM
(
items
,
i
,
o
);
PyErr_Fetch
(
&
t
,
&
v
,
&
tb
);
if
(
t
!=
PyExc_IndexError
)
{
PyErr_Restore
(
t
,
v
,
tb
);
return
NULL
;
}
Py_XDECREF
(
t
);
Py_XDECREF
(
v
);
Py_XDECREF
(
tb
);
break
;
}
ind
=
_bucket_set
(
self
,
o
,
Py_None
,
1
,
1
,
0
);
Py_DECREF
(
o
);
if
(
ind
<
0
)
return
NULL
;
n
+=
ind
;
}
}
}
if
(
self
->
next
)
return
PyInt_FromLong
(
n
);
r
=
Py_BuildValue
(
"OO"
,
items
,
self
->
next
);
}
else
r
=
Py_BuildValue
(
"(O)"
,
items
);
PER_ALLOW_DEACTIVATION
(
self
);
static
PyObject
*
Set_remove
(
Bucket
*
self
,
PyObject
*
args
)
{
PyObject
*
key
;
return
r
;
UNLESS
(
PyArg_ParseTuple
(
args
,
"O"
,
&
key
))
return
NULL
;
if
(
_bucket_set
(
self
,
key
,
NULL
,
0
,
1
,
0
)
<
0
)
return
NULL
;
err:
Py_INCREF
(
Py_None
);
PER_ALLOW_DEACTIVATION
(
self
);
return
Py_None
;
Py_XDECREF
(
items
);
Py_XDECREF
(
r
);
return
NULL
;
}
}
static
PyObject
*
static
int
set_setstate
(
Bucket
*
self
,
PyObject
*
args
)
_
set_setstate
(
Bucket
*
self
,
PyObject
*
args
)
{
{
PyObject
*
k
,
*
items
;
PyObject
*
k
,
*
items
;
Bucket
*
next
=
0
;
Bucket
*
next
=
0
;
...
@@ -149,14 +150,10 @@ set_setstate(Bucket *self, PyObject *args)
...
@@ -149,14 +150,10 @@ set_setstate(Bucket *self, PyObject *args)
KEY_TYPE
*
keys
;
KEY_TYPE
*
keys
;
VALUE_TYPE
*
values
;
VALUE_TYPE
*
values
;
PER_PREVENT_DEACTIVATION
(
self
);
UNLESS
(
PyArg_ParseTuple
(
args
,
"O|O"
,
&
items
,
&
next
))
return
-
1
;
UNLESS
(
PyArg_ParseTuple
(
args
,
"O"
,
&
args
))
goto
err
;
UNLESS
(
PyArg_ParseTuple
(
args
,
"O|O!"
,
&
items
,
self
->
ob_type
,
&
next
))
goto
err
;
if
((
l
=
PyTuple_Size
(
items
))
<
0
)
goto
err
;
if
((
l
=
PyTuple_Size
(
items
))
<
0
)
return
-
1
;
for
(
i
=
self
->
len
;
--
i
>=
0
;
)
for
(
i
=
self
->
len
;
--
i
>=
0
;
)
{
{
...
@@ -172,7 +169,7 @@ set_setstate(Bucket *self, PyObject *args)
...
@@ -172,7 +169,7 @@ set_setstate(Bucket *self, PyObject *args)
if
(
l
>
self
->
size
)
if
(
l
>
self
->
size
)
{
{
UNLESS
(
keys
=
PyRealloc
(
self
->
keys
,
sizeof
(
KEY_TYPE
)
*
l
))
goto
err
;
UNLESS
(
keys
=
PyRealloc
(
self
->
keys
,
sizeof
(
KEY_TYPE
)
*
l
))
return
-
1
;
self
->
keys
=
keys
;
self
->
keys
=
keys
;
self
->
size
=
l
;
self
->
size
=
l
;
}
}
...
@@ -181,25 +178,39 @@ set_setstate(Bucket *self, PyObject *args)
...
@@ -181,25 +178,39 @@ set_setstate(Bucket *self, PyObject *args)
{
{
k
=
PyTuple_GET_ITEM
(
items
,
i
);
k
=
PyTuple_GET_ITEM
(
items
,
i
);
COPY_KEY_FROM_ARG
(
self
->
keys
[
i
],
k
,
&
copied
);
COPY_KEY_FROM_ARG
(
self
->
keys
[
i
],
k
,
&
copied
);
UNLESS
(
copied
)
return
NULL
;
UNLESS
(
copied
)
return
-
1
;
INCREF_KEY
(
k
);
INCREF_KEY
(
self
->
keys
[
i
]
);
}
}
self
->
len
=
l
;
self
->
len
=
l
;
if
(
next
)
{
self
->
next
=
next
;
Py_INCREF
(
next
);
}
return
0
;
}
static
PyObject
*
set_setstate
(
Bucket
*
self
,
PyObject
*
args
)
{
int
r
;
UNLESS
(
PyArg_ParseTuple
(
args
,
"O"
,
&
args
))
return
NULL
;
PER_PREVENT_DEACTIVATION
(
self
);
r
=
_set_setstate
(
self
,
args
);
PER_ALLOW_DEACTIVATION
(
self
);
PER_ALLOW_DEACTIVATION
(
self
);
if
(
r
<
0
)
return
NULL
;
Py_INCREF
(
Py_None
);
Py_INCREF
(
Py_None
);
return
Py_None
;
return
Py_None
;
perr:
self
->
len
=
i
;
err:
PER_ALLOW_DEACTIVATION
(
self
);
return
NULL
;
}
}
static
struct
PyMethodDef
Set_methods
[]
=
{
static
struct
PyMethodDef
Set_methods
[]
=
{
{
"__getstate__"
,
(
PyCFunction
)
s
et_getstate
,
METH_VARARGS
,
{
"__getstate__"
,
(
PyCFunction
)
buck
et_getstate
,
METH_VARARGS
,
"__getstate__() -- Return the picklable state of the object"
},
"__getstate__() -- Return the picklable state of the object"
},
{
"__setstate__"
,
(
PyCFunction
)
set_setstate
,
METH_VARARGS
,
{
"__setstate__"
,
(
PyCFunction
)
set_setstate
,
METH_VARARGS
,
"__setstate__() -- Set the state of the object"
},
"__setstate__() -- Set the state of the object"
},
...
@@ -216,11 +227,17 @@ static struct PyMethodDef Set_methods[] = {
...
@@ -216,11 +227,17 @@ static struct PyMethodDef Set_methods[] = {
"minKey([key]) -- Fine the minimum key
\n\n
"
"minKey([key]) -- Fine the minimum key
\n\n
"
"If an argument is given, find the minimum >= the argument"
},
"If an argument is given, find the minimum >= the argument"
},
#ifdef PERSISTENT
#ifdef PERSISTENT
{
"_p_resolveConflict"
,
(
PyCFunction
)
bucket__p_resolveConflict
,
METH_VARARGS
,
"_p_resolveConflict() -- Reinitialize from a newly created copy"
},
{
"_p_deactivate"
,
(
PyCFunction
)
bucket__p_deactivate
,
METH_VARARGS
,
{
"_p_deactivate"
,
(
PyCFunction
)
bucket__p_deactivate
,
METH_VARARGS
,
"_p_deactivate() -- Reinitialize from a newly created copy"
},
"_p_deactivate() -- Reinitialize from a newly created copy"
},
#endif
#endif
{
"insert"
,
(
PyCFunction
)
Set_insert
,
METH_VARARGS
,
{
"insert"
,
(
PyCFunction
)
Set_insert
,
METH_VARARGS
,
"insert(id,[ignored]) -- Add a key to the set"
},
"insert(id,[ignored]) -- Add a key to the set"
},
{
"update"
,
(
PyCFunction
)
Set_update
,
METH_VARARGS
,
"update(seq) -- Add the items from the given sequence to the set"
},
{
"__init__"
,
(
PyCFunction
)
Set_update
,
METH_VARARGS
,
"__init__(seq) -- Initialize with the items from the given sequence"
},
{
"remove"
,
(
PyCFunction
)
Set_remove
,
METH_VARARGS
,
{
"remove"
,
(
PyCFunction
)
Set_remove
,
METH_VARARGS
,
"remove(id) -- Remove an id from the set"
},
"remove(id) -- Remove an id from the set"
},
...
...
trunk/src/BTrees/TreeSetTemplate.c
View file @
2a1ae51f
...
@@ -94,6 +94,42 @@ TreeSet_insert(BTree *self, PyObject *args)
...
@@ -94,6 +94,42 @@ TreeSet_insert(BTree *self, PyObject *args)
return
PyInt_FromLong
(
i
);
return
PyInt_FromLong
(
i
);
}
}
static
PyObject
*
TreeSet_update
(
BTree
*
self
,
PyObject
*
args
)
{
PyObject
*
seq
=
0
,
*
o
,
*
t
,
*
v
,
*
tb
;
int
i
,
n
=
0
,
ind
;
UNLESS
(
PyArg_ParseTuple
(
args
,
"|O:update"
,
&
seq
))
return
NULL
;
if
(
seq
)
{
for
(
i
=
0
;
;
i
++
)
{
UNLESS
(
o
=
PySequence_GetItem
(
seq
,
i
))
{
PyErr_Fetch
(
&
t
,
&
v
,
&
tb
);
if
(
t
!=
PyExc_IndexError
)
{
PyErr_Restore
(
t
,
v
,
tb
);
return
NULL
;
}
Py_XDECREF
(
t
);
Py_XDECREF
(
v
);
Py_XDECREF
(
tb
);
break
;
}
ind
=
_BTree_set
(
self
,
o
,
Py_None
,
1
,
1
);
Py_DECREF
(
o
);
if
(
ind
<
0
)
return
NULL
;
n
+=
ind
;
}
}
return
PyInt_FromLong
(
n
);
}
static
PyObject
*
static
PyObject
*
TreeSet_remove
(
BTree
*
self
,
PyObject
*
args
)
TreeSet_remove
(
BTree
*
self
,
PyObject
*
args
)
{
{
...
@@ -105,10 +141,26 @@ TreeSet_remove(BTree *self, PyObject *args)
...
@@ -105,10 +141,26 @@ TreeSet_remove(BTree *self, PyObject *args)
return
Py_None
;
return
Py_None
;
}
}
static
PyObject
*
TreeSet_setstate
(
BTree
*
self
,
PyObject
*
args
)
{
int
r
;
if
(
!
PyArg_ParseTuple
(
args
,
"O"
,
&
args
))
return
NULL
;
PER_PREVENT_DEACTIVATION
(
self
);
r
=
_BTree_setstate
(
self
,
args
,
1
);
PER_ALLOW_DEACTIVATION
(
self
);
if
(
r
<
0
)
return
NULL
;
Py_INCREF
(
Py_None
);
return
Py_None
;
}
static
struct
PyMethodDef
TreeSet_methods
[]
=
{
static
struct
PyMethodDef
TreeSet_methods
[]
=
{
{
"__getstate__"
,
(
PyCFunction
)
BTree_getstate
,
METH_VARARGS
,
{
"__getstate__"
,
(
PyCFunction
)
BTree_getstate
,
METH_VARARGS
,
"__getstate__() -- Return the picklable state of the object"
},
"__getstate__() -- Return the picklable state of the object"
},
{
"__setstate__"
,
(
PyCFunction
)
BTree
_setstate
,
METH_VARARGS
,
{
"__setstate__"
,
(
PyCFunction
)
TreeSet
_setstate
,
METH_VARARGS
,
"__setstate__() -- Set the state of the object"
},
"__setstate__() -- Set the state of the object"
},
{
"has_key"
,
(
PyCFunction
)
BTree_has_key
,
METH_VARARGS
,
{
"has_key"
,
(
PyCFunction
)
BTree_has_key
,
METH_VARARGS
,
"has_key(key) -- Test whether the bucket contains the given key"
},
"has_key(key) -- Test whether the bucket contains the given key"
},
...
@@ -124,9 +176,15 @@ static struct PyMethodDef TreeSet_methods[] = {
...
@@ -124,9 +176,15 @@ static struct PyMethodDef TreeSet_methods[] = {
"clear() -- Remove all of the items from the BTree"
},
"clear() -- Remove all of the items from the BTree"
},
{
"insert"
,
(
PyCFunction
)
TreeSet_insert
,
METH_VARARGS
,
{
"insert"
,
(
PyCFunction
)
TreeSet_insert
,
METH_VARARGS
,
"insert(id,[ignored]) -- Add an id to the set"
},
"insert(id,[ignored]) -- Add an id to the set"
},
{
"update"
,
(
PyCFunction
)
TreeSet_update
,
METH_VARARGS
,
"update(seq) -- Add the items from the given sequence to the set"
},
{
"__init__"
,
(
PyCFunction
)
TreeSet_update
,
METH_VARARGS
,
"__init__(seq) -- Initialize with the items from the given sequence"
},
{
"remove"
,
(
PyCFunction
)
TreeSet_remove
,
METH_VARARGS
,
{
"remove"
,
(
PyCFunction
)
TreeSet_remove
,
METH_VARARGS
,
"remove(id) -- Remove a key from the set"
},
"remove(id) -- Remove a key from the set"
},
#ifdef PERSISTENT
#ifdef PERSISTENT
{
"_p_resolveConflict"
,
(
PyCFunction
)
BTree__p_resolveConflict
,
METH_VARARGS
,
"_p_resolveConflict() -- Reinitialize from a newly created copy"
},
{
"_p_deactivate"
,
(
PyCFunction
)
BTree__p_deactivate
,
METH_VARARGS
,
{
"_p_deactivate"
,
(
PyCFunction
)
BTree__p_deactivate
,
METH_VARARGS
,
"_p_deactivate() -- Reinitialize from a newly created copy"
},
"_p_deactivate() -- Reinitialize from a newly created copy"
},
#endif
#endif
...
...
trunk/src/BTrees/convert.py
0 → 100644
View file @
2a1ae51f
##############################################################################
#
# Zope Public License (ZPL) Version 1.0
# -------------------------------------
#
# Copyright (c) Digital Creations. All rights reserved.
#
# This license has been certified as Open Source(tm).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions in source code must retain the above copyright
# notice, this list of conditions, and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. Digital Creations requests that attribution be given to Zope
# in any manner possible. Zope includes a "Powered by Zope"
# button that is installed by default. While it is not a license
# violation to remove this button, it is requested that the
# attribution remain. A significant investment has been put
# into Zope, and this effort will continue if the Zope community
# continues to grow. This is one way to assure that growth.
#
# 4. All advertising materials and documentation mentioning
# features derived from or use of this software must display
# the following acknowledgement:
#
# "This product includes software developed by Digital Creations
# for use in the Z Object Publishing Environment
# (http://www.zope.org/)."
#
# In the event that the product being advertised includes an
# intact Zope distribution (with copyright and license included)
# then this clause is waived.
#
# 5. Names associated with Zope or Digital Creations must not be used to
# endorse or promote products derived from this software without
# prior written permission from Digital Creations.
#
# 6. Modified redistributions of any form whatsoever must retain
# the following acknowledgment:
#
# "This product includes software developed by Digital Creations
# for use in the Z Object Publishing Environment
# (http://www.zope.org/)."
#
# Intact (re-)distributions of any official Zope release do not
# require an external acknowledgement.
#
# 7. Modifications are encouraged but must be packaged separately as
# patches to official Zope releases. Distributions that do not
# clearly separate the patches from the original work must be clearly
# labeled as unofficial distributions. Modifications which do not
# carry the name Zope may be packaged in any form, as long as they
# conform to all of the clauses above.
#
#
# Disclaimer
#
# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS ``AS IS'' AND ANY
# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL CREATIONS OR ITS
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
#
# This software consists of contributions made by Digital Creations and
# many individuals on behalf of Digital Creations. Specific
# attributions are listed in the accompanying credits file.
#
##############################################################################
def
convert
(
old
,
new
,
threshold
=
200
,
f
=
None
,
None
=
None
):
"Utility for converting old btree new new"
n
=
0
for
k
,
v
in
old
.
items
():
if
f
is
not
None
:
v
=
f
(
v
)
new
[
k
]
=
v
n
=
n
+
1
if
n
>
threshold
:
get_transaction
().
commit
(
1
)
old
.
_p_jar
.
cacheMinimize
(
3
)
n
=
0
get_transaction
().
commit
(
1
)
old
.
_p_jar
.
cacheMinimize
(
3
)
trunk/src/BTrees/intkeymacros.h
View file @
2a1ae51f
#define KEY_TYPE int
#define KEY_TYPE int
#define TEST_KEY(KEY, TARGET) ( (KEY) - (TARGET) )
#define KEY_CHECK PyInt_Check
#define TEST_KEY(K, T) (((K) < (T)) ? -1 : (((K) > (T)) ? 1: 0))
#define DECREF_KEY(KEY)
#define DECREF_KEY(KEY)
#define INCREF_KEY(k)
#define INCREF_KEY(k)
#define COPY_KEY(KEY, E) (KEY=(E))
#define COPY_KEY(KEY, E) (KEY=(E))
...
...
trunk/src/BTrees/intvaluemacros.h
View file @
2a1ae51f
#define VALUE_TYPE int
#define VALUE_TYPE int
#define TEST_VALUE(VALUE, TARGET) ( (VALUE) - (TARGET) )
#define TEST_VALUE(K, T) (((K) < (T)) ? -1 : (((K) > (T)) ? 1: 0))
#define VALUE_SAME(VALUE, TARGET) ( (VALUE) == (TARGET) )
#define DECLARE_VALUE(NAME) VALUE_TYPE NAME
#define DECLARE_VALUE(NAME) VALUE_TYPE NAME
#define VALUE_PARSE "i"
#define VALUE_PARSE "i"
#define DECREF_VALUE(k)
#define DECREF_VALUE(k)
...
...
trunk/src/BTrees/tests/__init__.py
0 → 100644
View file @
2a1ae51f
# If tests is a package, debugging is a bit easier.
trunk/src/BTrees/tests/testBTrees.py
View file @
2a1ae51f
...
@@ -122,10 +122,12 @@ class Base:
...
@@ -122,10 +122,12 @@ class Base:
os
.
system
(
'rm fs_tmp__*'
)
os
.
system
(
'rm fs_tmp__*'
)
def
testLoadAndStore
(
self
):
def
testLoadAndStore
(
self
):
t
=
self
.
t
for
i
in
0
,
10
,
1000
:
t
=
self
.
t
.
__class__
()
self
.
_populate
(
t
,
i
)
try
:
try
:
root
=
self
.
_getRoot
()
root
=
self
.
_getRoot
()
root
[
't'
]
=
t
root
[
i
]
=
t
get_transaction
().
commit
()
get_transaction
().
commit
()
except
:
except
:
self
.
_closeDB
(
root
)
self
.
_closeDB
(
root
)
...
@@ -138,18 +140,20 @@ class Base:
...
@@ -138,18 +140,20 @@ class Base:
root
=
self
.
_getRoot
()
root
=
self
.
_getRoot
()
#XXX BTree stuff doesn't implement comparison
#XXX BTree stuff doesn't implement comparison
if
hasattr
(
t
,
'items'
):
if
hasattr
(
t
,
'items'
):
assert
list
(
root
[
't'
].
items
())
==
list
(
t
.
items
())
assert
list
(
root
[
i
].
items
())
==
list
(
t
.
items
())
else
:
else
:
assert
list
(
root
[
't'
].
keys
())
==
list
(
t
.
keys
())
assert
list
(
root
[
i
].
keys
())
==
list
(
t
.
keys
())
finally
:
finally
:
self
.
_closeDB
(
root
)
self
.
_closeDB
(
root
)
self
.
_delDB
()
self
.
_delDB
()
def
testGhostUnghost
(
self
):
def
testGhostUnghost
(
self
):
t
=
self
.
t
for
i
in
0
,
10
,
1000
:
t
=
self
.
t
.
__class__
()
self
.
_populate
(
t
,
i
)
try
:
try
:
root
=
self
.
_getRoot
()
root
=
self
.
_getRoot
()
root
[
't'
]
=
t
root
[
i
]
=
t
get_transaction
().
commit
()
get_transaction
().
commit
()
except
:
except
:
self
.
_closeDB
(
root
)
self
.
_closeDB
(
root
)
...
@@ -160,18 +164,23 @@ class Base:
...
@@ -160,18 +164,23 @@ class Base:
try
:
try
:
root
=
self
.
_getRoot
()
root
=
self
.
_getRoot
()
root
[
't'
].
_p_changed
=
None
root
[
i
].
_p_changed
=
None
get_transaction
().
commit
()
get_transaction
().
commit
()
if
hasattr
(
t
,
'items'
):
if
hasattr
(
t
,
'items'
):
assert
list
(
root
[
't'
].
items
())
==
list
(
t
.
items
())
assert
list
(
root
[
i
].
items
())
==
list
(
t
.
items
())
else
:
else
:
assert
list
(
root
[
't'
].
keys
())
==
list
(
t
.
keys
())
assert
list
(
root
[
i
].
keys
())
==
list
(
t
.
keys
())
finally
:
finally
:
self
.
_closeDB
(
root
)
self
.
_closeDB
(
root
)
self
.
_delDB
()
self
.
_delDB
()
class
MappingBase
(
Base
):
class
MappingBase
(
Base
):
""" Tests common to mappings (buckets, btrees) """
""" Tests common to mappings (buckets, btrees) """
def
_populate
(
self
,
t
,
l
):
# Make some data
for
i
in
range
(
l
):
t
[
i
]
=
i
def
testGetItemFails
(
self
):
def
testGetItemFails
(
self
):
self
.
assertRaises
(
KeyError
,
self
.
_getitemfail
)
self
.
assertRaises
(
KeyError
,
self
.
_getitemfail
)
...
@@ -271,8 +280,45 @@ class MappingBase(Base):
...
@@ -271,8 +280,45 @@ class MappingBase(Base):
diff
=
lsubtract
(
list
(
self
.
t
.
keys
()),
[])
diff
=
lsubtract
(
list
(
self
.
t
.
keys
()),
[])
assert
diff
==
[],
diff
assert
diff
==
[],
diff
def
testUpdate
(
self
):
"mapping update"
d
=
{}
l
=
[]
for
i
in
range
(
10000
):
k
=
whrandom
.
randint
(
-
2000
,
2000
)
d
[
k
]
=
i
l
.
append
((
k
,
i
))
items
=
d
.
items
()
items
.
sort
()
self
.
t
.
update
(
d
)
assert
list
(
self
.
t
.
items
())
==
items
self
.
t
.
clear
()
assert
list
(
self
.
t
.
items
())
==
[]
self
.
t
.
update
(
l
)
assert
list
(
self
.
t
.
items
())
==
items
def
testEmptyRangeSearches
(
self
):
t
=
self
.
t
t
.
update
([(
1
,
1
),(
5
,
5
),(
9
,
9
)])
assert
list
(
t
.
keys
(
-
6
,
-
4
))
==
[],
list
(
t
.
keys
(
-
6
,
-
4
))
assert
list
(
t
.
keys
(
2
,
4
))
==
[],
list
(
t
.
keys
(
2
,
4
))
assert
list
(
t
.
keys
(
6
,
8
))
==
[],
list
(
t
.
keys
(
6
,
8
))
assert
list
(
t
.
keys
(
10
,
12
))
==
[],
list
(
t
.
keys
(
10
,
12
))
class
NormalSetTests
(
Base
):
class
NormalSetTests
(
Base
):
""" Test common to all set types """
""" Test common to all set types """
def
_populate
(
self
,
t
,
l
):
# Make some data
t
.
update
(
range
(
l
))
def
testInsertReturnsValue
(
self
):
def
testInsertReturnsValue
(
self
):
t
=
self
.
t
t
=
self
.
t
assert
t
.
insert
(
5
)
==
1
assert
t
.
insert
(
5
)
==
1
...
@@ -343,6 +389,29 @@ class NormalSetTests(Base):
...
@@ -343,6 +389,29 @@ class NormalSetTests(Base):
assert
t
.
minKey
(
3
)
==
3
assert
t
.
minKey
(
3
)
==
3
assert
t
.
minKey
(
9
)
==
10
assert
t
.
minKey
(
9
)
==
10
def
testUpdate
(
self
):
"mapping update"
d
=
{}
l
=
[]
for
i
in
range
(
10000
):
k
=
whrandom
.
randint
(
-
2000
,
2000
)
d
[
k
]
=
i
l
.
append
(
k
)
items
=
d
.
keys
()
items
.
sort
()
self
.
t
.
update
(
l
)
assert
list
(
self
.
t
.
keys
())
==
items
def
testEmptyRangeSearches
(
self
):
t
=
self
.
t
t
.
update
([
1
,
5
,
9
])
assert
list
(
t
.
keys
(
-
6
,
-
4
))
==
[],
list
(
t
.
keys
(
-
6
,
-
4
))
assert
list
(
t
.
keys
(
2
,
4
))
==
[],
list
(
t
.
keys
(
2
,
4
))
assert
list
(
t
.
keys
(
6
,
8
))
==
[],
list
(
t
.
keys
(
6
,
8
))
assert
list
(
t
.
keys
(
10
,
12
))
==
[],
list
(
t
.
keys
(
10
,
12
))
class
ExtendedSetTests
(
NormalSetTests
):
class
ExtendedSetTests
(
NormalSetTests
):
def
testLen
(
self
):
def
testLen
(
self
):
t
=
self
.
t
t
=
self
.
t
...
...
trunk/src/BTrees/tests/testConflict.py
0 → 100644
View file @
2a1ae51f
##############################################################################
#
# Zope Public License (ZPL) Version 1.0
# -------------------------------------
#
# Copyright (c) Digital Creations. All rights reserved.
#
# This license has been certified as Open Source(tm).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions in source code must retain the above copyright
# notice, this list of conditions, and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. Digital Creations requests that attribution be given to Zope
# in any manner possible. Zope includes a "Powered by Zope"
# button that is installed by default. While it is not a license
# violation to remove this button, it is requested that the
# attribution remain. A significant investment has been put
# into Zope, and this effort will continue if the Zope community
# continues to grow. This is one way to assure that growth.
#
# 4. All advertising materials and documentation mentioning
# features derived from or use of this software must display
# the following acknowledgement:
#
# "This product includes software developed by Digital Creations
# for use in the Z Object Publishing Environment
# (http://www.zope.org/)."
#
# In the event that the product being advertised includes an
# intact Zope distribution (with copyright and license included)
# then this clause is waived.
#
# 5. Names associated with Zope or Digital Creations must not be used to
# endorse or promote products derived from this software without
# prior written permission from Digital Creations.
#
# 6. Modified redistributions of any form whatsoever must retain
# the following acknowledgment:
#
# "This product includes software developed by Digital Creations
# for use in the Z Object Publishing Environment
# (http://www.zope.org/)."
#
# Intact (re-)distributions of any official Zope release do not
# require an external acknowledgement.
#
# 7. Modifications are encouraged but must be packaged separately as
# patches to official Zope releases. Distributions that do not
# clearly separate the patches from the original work must be clearly
# labeled as unofficial distributions. Modifications which do not
# carry the name Zope may be packaged in any form, as long as they
# conform to all of the clauses above.
#
#
# Disclaimer
#
# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS ``AS IS'' AND ANY
# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL CREATIONS OR ITS
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
#
# This software consists of contributions made by Digital Creations and
# many individuals on behalf of Digital Creations. Specific
# attributions are listed in the accompanying credits file.
#
##############################################################################
import
sys
,
os
,
time
,
whrandom
try
:
sys
.
path
.
insert
(
0
,
'.'
)
import
ZODB
except
:
sys
.
path
.
insert
(
0
,
'../..'
)
import
ZODB
from
BTrees.OOBTree
import
OOBTree
,
OOBucket
,
OOSet
,
OOTreeSet
from
BTrees.IOBTree
import
IOBTree
,
IOBucket
,
IOSet
,
IOTreeSet
from
BTrees.IIBTree
import
IIBTree
,
IIBucket
,
IISet
,
IITreeSet
from
BTrees.OIBTree
import
OIBTree
,
OIBucket
,
OISet
,
OITreeSet
from
unittest
import
TestCase
,
TestSuite
,
JUnitTextTestRunner
,
VerboseTextTestRunner
,
makeSuite
TextTestRunner
=
VerboseTextTestRunner
class
Base
:
""" Tests common to all types: sets, buckets, and BTrees """
def
tearDown
(
self
):
self
.
t
=
None
del
self
.
t
def
_getRoot
(
self
):
from
ZODB.FileStorage
import
FileStorage
from
ZODB.DB
import
DB
n
=
'fs_tmp__%s'
%
os
.
getpid
()
s
=
FileStorage
(
n
)
db
=
DB
(
s
)
root
=
db
.
open
().
root
()
return
root
def
_closeDB
(
self
,
root
):
root
.
_p_jar
.
_db
.
close
()
root
=
None
def
_delDB
(
self
):
os
.
system
(
'rm fs_tmp__*'
)
class
MappingBase
(
Base
):
""" Tests common to mappings (buckets, btrees) """
def
_deletefail
(
self
):
del
self
.
t
[
1
]
def
_setupConflict
(
self
):
base
=
self
.
t
d
=
{}
for
i
in
range
(
20
):
d
[
whrandom
.
randint
(
-
10000
,
10000
)]
=
i
*
100000
e1
=
{}
while
len
(
e1
)
<
5
:
k
=
whrandom
.
randint
(
-
10000
,
10000
)
if
not
d
.
has_key
(
k
):
e1
[
k
]
=
len
(
e1
)
e1
=
e1
.
items
()
e2
=
{}
while
len
(
e2
)
<
5
:
k
=
whrandom
.
randint
(
-
10000
,
10000
)
if
not
d
.
has_key
(
k
)
and
not
e2
.
has_key
(
k
):
e2
[
k
]
=
len
(
e2
)
e2
=
e2
.
items
()
base
.
update
(
d
)
b1
=
base
.
__class__
(
base
)
b2
=
base
.
__class__
(
base
)
bm
=
base
.
__class__
(
base
)
items
=
base
.
items
()
return
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
def
testMergeDelete
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
del
b1
[
items
[
0
][
0
]]
del
b2
[
items
[
5
][
0
]]
del
b1
[
items
[
-
1
][
0
]]
del
b2
[
items
[
-
2
][
0
]]
del
bm
[
items
[
0
][
0
]]
del
bm
[
items
[
5
][
0
]]
del
bm
[
items
[
-
1
][
0
]]
del
bm
[
items
[
-
2
][
0
]]
test_merge
(
base
,
b1
,
b2
,
bm
,
'merge delete'
)
def
testMergeDeleteAndUpdate
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
del
b1
[
items
[
0
][
0
]]
b2
[
items
[
5
][
0
]]
=
1
del
b1
[
items
[
-
1
][
0
]]
b2
[
items
[
-
2
][
0
]]
=
2
del
bm
[
items
[
0
][
0
]]
bm
[
items
[
5
][
0
]]
=
1
del
bm
[
items
[
-
1
][
0
]]
bm
[
items
[
-
2
][
0
]]
=
2
test_merge
(
base
,
b1
,
b2
,
bm
,
'merge update and delete'
)
def
testMergeUpdate
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
b1
[
items
[
0
][
0
]]
=
1
b2
[
items
[
5
][
0
]]
=
2
b1
[
items
[
-
1
][
0
]]
=
3
b2
[
items
[
-
2
][
0
]]
=
4
bm
[
items
[
0
][
0
]]
=
1
bm
[
items
[
5
][
0
]]
=
2
bm
[
items
[
-
1
][
0
]]
=
3
bm
[
items
[
-
2
][
0
]]
=
4
test_merge
(
base
,
b1
,
b2
,
bm
,
'merge update'
)
def
testFailMergeDelete
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
del
b1
[
items
[
0
][
0
]]
del
b2
[
items
[
0
][
0
]]
test_merge
(
base
,
b1
,
b2
,
bm
,
'merge conflicting delete'
,
should_fail
=
1
)
def
testFailMergeUpdate
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
b1
[
items
[
0
][
0
]]
=
1
b2
[
items
[
0
][
0
]]
=
2
test_merge
(
base
,
b1
,
b2
,
bm
,
'merge conflicting update'
,
should_fail
=
1
)
def
testFailMergeDeleteAndUpdate
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
del
b1
[
items
[
0
][
0
]]
b2
[
items
[
0
][
0
]]
=-
9
test_merge
(
base
,
b1
,
b2
,
bm
,
'merge conflicting update and delete'
,
should_fail
=
1
)
def
testMergeInserts
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
b1
[
-
99999
]
=-
99999
b1
[
e1
[
0
][
0
]]
=
e1
[
0
][
1
]
b2
[
99999
]
=
99999
b2
[
e1
[
2
][
0
]]
=
e1
[
2
][
1
]
bm
[
-
99999
]
=-
99999
bm
[
e1
[
0
][
0
]]
=
e1
[
0
][
1
]
bm
[
99999
]
=
99999
bm
[
e1
[
2
][
0
]]
=
e1
[
2
][
1
]
test_merge
(
base
,
b1
,
b2
,
bm
,
'merge insert'
)
def
testMergeInsertsFromEmpty
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
base
.
clear
()
b1
.
clear
()
b2
.
clear
()
bm
.
clear
()
b1
.
update
(
e1
)
bm
.
update
(
e1
)
b2
.
update
(
e2
)
bm
.
update
(
e2
)
test_merge
(
base
,
b1
,
b2
,
bm
,
'merge insert from empty'
)
def
testMergeEmptyAndFill
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
b1
.
clear
()
bm
.
clear
()
b2
.
update
(
e2
)
bm
.
update
(
e2
)
test_merge
(
base
,
b1
,
b2
,
bm
,
'merge insert from empty'
)
def
testMergeEmpty
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
b1
.
clear
()
bm
.
clear
()
test_merge
(
base
,
b1
,
b2
,
bm
,
'empty one and not other'
)
def
testFailMergeInsert
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
b1
[
-
99999
]
=-
99999
b1
[
e1
[
0
][
0
]]
=
e1
[
0
][
1
]
b2
[
99999
]
=
99999
b2
[
e1
[
0
][
0
]]
=
e1
[
0
][
1
]
test_merge
(
base
,
b1
,
b2
,
bm
,
'merge conflicting inserts'
,
should_fail
=
1
)
class
NormalSetTests
(
Base
):
""" Test common to all set types """
class
ExtendedSetTests
(
NormalSetTests
):
"Set (as opposed to TreeSet) specific tests."
def
_setupConflict
(
self
):
base
=
self
.
t
d
=
{}
for
i
in
range
(
20
):
d
[
whrandom
.
randint
(
-
10000
,
10000
)]
=
i
*
100000
e1
=
{}
while
len
(
e1
)
<
5
:
k
=
whrandom
.
randint
(
-
10000
,
10000
)
if
not
d
.
has_key
(
k
):
e1
[
k
]
=
len
(
e1
)
e1
=
e1
.
keys
()
e2
=
{}
while
len
(
e2
)
<
5
:
k
=
whrandom
.
randint
(
-
10000
,
10000
)
if
not
d
.
has_key
(
k
)
and
not
e2
.
has_key
(
k
):
e2
[
k
]
=
len
(
e2
)
e2
=
e2
.
keys
()
base
.
update
(
d
.
keys
())
b1
=
base
.
__class__
(
base
)
b2
=
base
.
__class__
(
base
)
bm
=
base
.
__class__
(
base
)
items
=
base
.
keys
()
return
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
def
testMergeDelete
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
b1
.
remove
(
items
[
0
])
b2
.
remove
(
items
[
5
])
b1
.
remove
(
items
[
-
1
])
b2
.
remove
(
items
[
-
2
])
bm
.
remove
(
items
[
0
])
bm
.
remove
(
items
[
5
])
bm
.
remove
(
items
[
-
1
])
bm
.
remove
(
items
[
-
2
])
test_merge
(
base
,
b1
,
b2
,
bm
,
'merge delete'
)
def
testFailMergeDelete
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
b1
.
remove
(
items
[
0
])
b2
.
remove
(
items
[
0
])
test_merge
(
base
,
b1
,
b2
,
bm
,
'merge conflicting delete'
,
should_fail
=
1
)
def
testMergeInserts
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
b1
.
insert
(
-
99999
)
b1
.
insert
(
e1
[
0
])
b2
.
insert
(
99999
)
b2
.
insert
(
e1
[
2
])
bm
.
insert
(
-
99999
)
bm
.
insert
(
e1
[
0
])
bm
.
insert
(
99999
)
bm
.
insert
(
e1
[
2
])
test_merge
(
base
,
b1
,
b2
,
bm
,
'merge insert'
)
def
testMergeInsertsFromEmpty
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
base
.
clear
()
b1
.
clear
()
b2
.
clear
()
bm
.
clear
()
b1
.
update
(
e1
)
bm
.
update
(
e1
)
b2
.
update
(
e2
)
bm
.
update
(
e2
)
test_merge
(
base
,
b1
,
b2
,
bm
,
'merge insert from empty'
)
def
testMergeEmptyAndFill
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
b1
.
clear
()
bm
.
clear
()
b2
.
update
(
e2
)
bm
.
update
(
e2
)
test_merge
(
base
,
b1
,
b2
,
bm
,
'merge insert from empty'
)
def
testMergeEmpty
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
b1
.
clear
()
bm
.
clear
()
test_merge
(
base
,
b1
,
b2
,
bm
,
'empty one and not other'
)
def
testFailMergeInsert
(
self
):
base
,
b1
,
b2
,
bm
,
e1
,
e2
,
items
=
self
.
_setupConflict
()
b1
.
insert
(
-
99999
)
b1
.
insert
(
e1
[
0
])
b2
.
insert
(
99999
)
b2
.
insert
(
e1
[
0
])
test_merge
(
base
,
b1
,
b2
,
bm
,
'merge conflicting inserts'
,
should_fail
=
1
)
def
test_merge
(
o1
,
o2
,
o3
,
expect
,
message
=
'failed to merge'
,
should_fail
=
0
):
s1
=
o1
.
__getstate__
()
s2
=
o2
.
__getstate__
()
s3
=
o3
.
__getstate__
()
expected
=
expect
.
__getstate__
()
if
expected
is
None
:
expected
=
((((),),),)
if
should_fail
:
try
:
merged
=
o1
.
_p_resolveConflict
(
s1
,
s2
,
s3
)
except
:
pass
# cool
else
:
assert
0
,
message
else
:
merged
=
o1
.
_p_resolveConflict
(
s1
,
s2
,
s3
)
assert
merged
==
expected
,
message
class
BucketTests
(
MappingBase
):
""" Tests common to all buckets """
class
BTreeTests
(
MappingBase
):
""" Tests common to all BTrees """
## BTree tests
class
TestIOBTrees
(
BTreeTests
,
TestCase
):
def
setUp
(
self
):
self
.
t
=
IOBTree
()
class
TestOOBTrees
(
BTreeTests
,
TestCase
):
def
setUp
(
self
):
self
.
t
=
OOBTree
()
class
TestOIBTrees
(
BTreeTests
,
TestCase
):
def
setUp
(
self
):
self
.
t
=
OIBTree
()
class
TestIIBTrees
(
BTreeTests
,
TestCase
):
def
setUp
(
self
):
self
.
t
=
IIBTree
()
## Set tests
class
TestIOSets
(
ExtendedSetTests
,
TestCase
):
def
setUp
(
self
):
self
.
t
=
IOSet
()
class
TestOOSets
(
ExtendedSetTests
,
TestCase
):
def
setUp
(
self
):
self
.
t
=
OOSet
()
class
TestIISets
(
ExtendedSetTests
,
TestCase
):
def
setUp
(
self
):
self
.
t
=
IISet
()
class
TestOISets
(
ExtendedSetTests
,
TestCase
):
def
setUp
(
self
):
self
.
t
=
OISet
()
class
TestIOTreeSets
(
NormalSetTests
,
TestCase
):
def
setUp
(
self
):
self
.
t
=
IOTreeSet
()
class
TestOOTreeSets
(
NormalSetTests
,
TestCase
):
def
setUp
(
self
):
self
.
t
=
OOTreeSet
()
class
TestIITreeSets
(
NormalSetTests
,
TestCase
):
def
setUp
(
self
):
self
.
t
=
IITreeSet
()
class
TestOITreeSets
(
NormalSetTests
,
TestCase
):
def
setUp
(
self
):
self
.
t
=
OITreeSet
()
## Bucket tests
class
TestIOBuckets
(
BucketTests
,
TestCase
):
def
setUp
(
self
):
self
.
t
=
IOBucket
()
class
TestOOBuckets
(
BucketTests
,
TestCase
):
def
setUp
(
self
):
self
.
t
=
OOBucket
()
class
TestIIBuckets
(
BucketTests
,
TestCase
):
def
setUp
(
self
):
self
.
t
=
IIBucket
()
class
TestOIBuckets
(
BucketTests
,
TestCase
):
def
setUp
(
self
):
self
.
t
=
OIBucket
()
def
test_suite
():
TIOBTree
=
makeSuite
(
TestIOBTrees
,
'test'
)
TOOBTree
=
makeSuite
(
TestOOBTrees
,
'test'
)
TOIBTree
=
makeSuite
(
TestOIBTrees
,
'test'
)
TIIBTree
=
makeSuite
(
TestIIBTrees
,
'test'
)
TIOSet
=
makeSuite
(
TestIOSets
,
'test'
)
TOOSet
=
makeSuite
(
TestOOSets
,
'test'
)
TOISet
=
makeSuite
(
TestIOSets
,
'test'
)
TIISet
=
makeSuite
(
TestOOSets
,
'test'
)
TIOTreeSet
=
makeSuite
(
TestIOTreeSets
,
'test'
)
TOOTreeSet
=
makeSuite
(
TestOOTreeSets
,
'test'
)
TOITreeSet
=
makeSuite
(
TestIOTreeSets
,
'test'
)
TIITreeSet
=
makeSuite
(
TestOOTreeSets
,
'test'
)
TIOBucket
=
makeSuite
(
TestIOBuckets
,
'test'
)
TOOBucket
=
makeSuite
(
TestOOBuckets
,
'test'
)
TOIBucket
=
makeSuite
(
TestOIBuckets
,
'test'
)
TIIBucket
=
makeSuite
(
TestIIBuckets
,
'test'
)
alltests
=
TestSuite
((
TIOSet
,
TOOSet
,
TOISet
,
TIISet
,
TIOTreeSet
,
TOOTreeSet
,
TOITreeSet
,
TIITreeSet
,
TIOBucket
,
TOOBucket
,
TOIBucket
,
TIIBucket
,
TOOBTree
,
TIOBTree
,
TOIBTree
,
TIIBTree
))
return
alltests
def
main
():
alltests
=
test_suite
()
runner
=
TextTestRunner
()
runner
.
run
(
alltests
)
def
debug
():
test_suite
().
debug
()
def
pdebug
():
import
pdb
pdb
.
run
(
'debug()'
)
## utility functions
def
lsubtract
(
l1
,
l2
):
l1
=
list
(
l1
)
l2
=
list
(
l2
)
l
=
filter
(
lambda
x
,
l1
=
l1
:
x
not
in
l1
,
l2
)
l
=
l
+
filter
(
lambda
x
,
l2
=
l2
:
x
not
in
l2
,
l1
)
return
l
def
realseq
(
itemsob
):
return
map
(
lambda
x
:
x
,
itemsob
)
if
__name__
==
'__main__'
:
if
len
(
sys
.
argv
)
>
1
:
globals
()[
sys
.
argv
[
1
]]()
else
:
main
()
trunk/src/ZODB/BaseStorage.py
View file @
2a1ae51f
...
@@ -84,9 +84,10 @@
...
@@ -84,9 +84,10 @@
##############################################################################
##############################################################################
"""Handy standard storage machinery
"""Handy standard storage machinery
"""
"""
__version__
=
'$Revision: 1.1
0
$'
[
11
:
-
2
]
__version__
=
'$Revision: 1.1
1
$'
[
11
:
-
2
]
import
time
,
bpthread
,
UndoLogCompatible
import
ThreadLock
,
bpthread
import
time
,
UndoLogCompatible
from
POSException
import
UndoError
from
POSException
import
UndoError
from
TimeStamp
import
TimeStamp
from
TimeStamp
import
TimeStamp
z64
=
'
\
0
'
*
8
z64
=
'
\
0
'
*
8
...
@@ -101,7 +102,7 @@ class BaseStorage(UndoLogCompatible.UndoLogCompatible):
...
@@ -101,7 +102,7 @@ class BaseStorage(UndoLogCompatible.UndoLogCompatible):
self
.
__name__
=
name
self
.
__name__
=
name
# Allocate locks:
# Allocate locks:
l
=
bpthread
.
allocate_lock
()
l
=
ThreadLock
.
allocate_lock
()
self
.
_lock_acquire
=
l
.
acquire
self
.
_lock_acquire
=
l
.
acquire
self
.
_lock_release
=
l
.
release
self
.
_lock_release
=
l
.
release
l
=
bpthread
.
allocate_lock
()
l
=
bpthread
.
allocate_lock
()
...
...
trunk/src/ZODB/ConflictResolution.py
0 → 100644
View file @
2a1ae51f
##############################################################################
#
# Zope Public License (ZPL) Version 1.0
# -------------------------------------
#
# Copyright (c) Digital Creations. All rights reserved.
#
# This license has been certified as Open Source(tm).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions in source code must retain the above copyright
# notice, this list of conditions, and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. Digital Creations requests that attribution be given to Zope
# in any manner possible. Zope includes a "Powered by Zope"
# button that is installed by default. While it is not a license
# violation to remove this button, it is requested that the
# attribution remain. A significant investment has been put
# into Zope, and this effort will continue if the Zope community
# continues to grow. This is one way to assure that growth.
#
# 4. All advertising materials and documentation mentioning
# features derived from or use of this software must display
# the following acknowledgement:
#
# "This product includes software developed by Digital Creations
# for use in the Z Object Publishing Environment
# (http://www.zope.org/)."
#
# In the event that the product being advertised includes an
# intact Zope distribution (with copyright and license included)
# then this clause is waived.
#
# 5. Names associated with Zope or Digital Creations must not be used to
# endorse or promote products derived from this software without
# prior written permission from Digital Creations.
#
# 6. Modified redistributions of any form whatsoever must retain
# the following acknowledgment:
#
# "This product includes software developed by Digital Creations
# for use in the Z Object Publishing Environment
# (http://www.zope.org/)."
#
# Intact (re-)distributions of any official Zope release do not
# require an external acknowledgement.
#
# 7. Modifications are encouraged but must be packaged separately as
# patches to official Zope releases. Distributions that do not
# clearly separate the patches from the original work must be clearly
# labeled as unofficial distributions. Modifications which do not
# carry the name Zope may be packaged in any form, as long as they
# conform to all of the clauses above.
#
#
# Disclaimer
#
# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS ``AS IS'' AND ANY
# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL CREATIONS OR ITS
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
#
# This software consists of contributions made by Digital Creations and
# many individuals on behalf of Digital Creations. Specific
# attributions are listed in the accompanying credits file.
#
##############################################################################
from
cStringIO
import
StringIO
from
cPickle
import
Unpickler
,
Pickler
import
sys
#import traceback
bad_classes
=
{}
bad_class
=
bad_classes
.
has_key
ResolvedSerial
=
'rs'
def
_classFactory
(
location
,
name
,
_silly
=
(
'__doc__'
,),
_globals
=
{}):
return
getattr
(
__import__
(
location
,
_globals
,
_globals
,
_silly
),
name
)
def
state
(
self
,
oid
,
serial
,
prfactory
):
p
=
self
.
loadSerial
(
oid
,
serial
)
file
=
StringIO
(
p
)
unpickler
=
Unpickler
(
file
)
unpickler
.
persistent_load
=
prfactory
class_tuple
=
unpickler
.
load
()
state
=
unpickler
.
load
()
return
state
class
PersistentReference
:
def
__repr__
(
self
):
return
"PR(%s %s)"
%
(
id
(
self
),
self
.
data
)
def
__getstate__
(
self
):
raise
"Can't pickle PersistentReference"
class
PersistentReferenceFactory
:
data
=
None
def
__call__
(
self
,
oid
,
getattr
=
getattr
,
None
=
None
):
data
=
self
.
data
if
not
data
:
data
=
self
.
data
=
{}
r
=
data
.
get
(
oid
,
None
)
if
r
is
None
:
r
=
PersistentReference
()
r
.
data
=
oid
data
[
oid
]
=
r
return
r
def
persistent_id
(
object
,
PersistentReference
=
PersistentReference
,
getattr
=
getattr
):
if
getattr
(
object
,
'__class__'
,
0
)
is
not
PersistentReference
:
return
None
return
object
.
data
class
ConflictResolvingStorage
:
"Mix-in class that provides conflict resolution handling for storages"
def
tryToResolveConflict
(
self
,
oid
,
committedSerial
,
oldSerial
,
newpickle
):
#class_tuple, old, committed, newstate = ('',''), 0, 0, 0
try
:
file
=
StringIO
(
newpickle
)
unpickler
=
Unpickler
(
file
)
prfactory
=
PersistentReferenceFactory
()
unpickler
.
persistent_load
=
prfactory
class_tuple
=
unpickler
.
load
()[
0
]
if
bad_class
(
class_tuple
):
#sys.stderr.write(' b%s ' % class_tuple[1]); sys.stderr.flush()
return
0
newstate
=
unpickler
.
load
()
klass
=
_classFactory
(
class_tuple
[
0
],
class_tuple
[
1
])
klass
.
_p_resolveConflict
inst
=
klass
.
__basicnew__
()
try
:
resolve
=
inst
.
_p_resolveConflict
except
AttributeError
:
bad_classes
[
class_tuple
]
=
1
#traceback.print_exc()
#sys.stderr.write(' b%s ' % class_tuple[1]); sys.stderr.flush()
return
0
old
=
state
(
self
,
oid
,
oldSerial
,
prfactory
)
committed
=
state
(
self
,
oid
,
committedSerial
,
prfactory
)
resolved
=
resolve
(
old
,
committed
,
newstate
)
file
=
StringIO
()
pickler
=
Pickler
(
file
,
1
)
pickler
.
persistent_id
=
persistent_id
pickler
.
dump
(
class_tuple
)
pickler
.
dump
(
resolved
)
#sys.stderr.write(' r%s ' % class_tuple[1]); sys.stderr.flush()
return
file
.
getvalue
(
1
)
except
Exception
,
v
:
#print '='*70
#print v, v.args
#print '='*70
#print old
#print '='*70
#print committed
#print '='*70
#print newstate
#print '='*70
#traceback.print_exc()
#sys.stderr.write(' c%s ' % class_tuple[1]); sys.stderr.flush()
return
0
trunk/src/ZODB/Connection.py
View file @
2a1ae51f
...
@@ -84,8 +84,8 @@
...
@@ -84,8 +84,8 @@
##############################################################################
##############################################################################
"""Database connection support
"""Database connection support
$Id: Connection.py,v 1.4
5 2001/02/09 14:11:49
jim Exp $"""
$Id: Connection.py,v 1.4
6 2001/03/15 13:16:26
jim Exp $"""
__version__
=
'$Revision: 1.4
5
$'
[
11
:
-
2
]
__version__
=
'$Revision: 1.4
6
$'
[
11
:
-
2
]
from
cPickleCache
import
PickleCache
from
cPickleCache
import
PickleCache
from
POSException
import
ConflictError
,
ExportError
from
POSException
import
ConflictError
,
ExportError
...
@@ -94,8 +94,9 @@ from cPickle import Unpickler, Pickler
...
@@ -94,8 +94,9 @@ from cPickle import Unpickler, Pickler
from
ExtensionClass
import
Base
from
ExtensionClass
import
Base
from
time
import
time
from
time
import
time
import
Transaction
,
string
,
ExportImport
,
sys
,
traceback
,
TmpStore
import
Transaction
,
string
,
ExportImport
,
sys
,
traceback
,
TmpStore
from
zLOG
import
LOG
,
ERROR
from
zLOG
import
LOG
,
ERROR
,
BLATHER
from
coptimizations
import
new_persistent_id
from
coptimizations
import
new_persistent_id
from
ConflictResolution
import
ResolvedSerial
ExtensionKlass
=
Base
.
__class__
ExtensionKlass
=
Base
.
__class__
...
@@ -230,6 +231,9 @@ class Connection(ExportImport.ExportImport):
...
@@ -230,6 +231,9 @@ class Connection(ExportImport.ExportImport):
This just deactivates the thing.
This just deactivates the thing.
"""
"""
if
object
is
self
:
self
.
_cache
.
invalidate
(
self
.
_invalidated
)
else
:
self
.
_cache
.
invalidate
(
object
.
_p_oid
)
self
.
_cache
.
invalidate
(
object
.
_p_oid
)
def
cacheFullSweep
(
self
,
dt
=
0
):
self
.
_cache
.
full_sweep
(
dt
)
def
cacheFullSweep
(
self
,
dt
=
0
):
self
.
_cache
.
full_sweep
(
dt
)
...
@@ -257,6 +261,8 @@ class Connection(ExportImport.ExportImport):
...
@@ -257,6 +261,8 @@ class Connection(ExportImport.ExportImport):
db
.
_closeConnection
(
self
)
db
.
_closeConnection
(
self
)
def
commit
(
self
,
object
,
transaction
,
_type
=
type
,
_st
=
type
(
''
)):
def
commit
(
self
,
object
,
transaction
,
_type
=
type
,
_st
=
type
(
''
)):
if
object
is
self
:
return
# we registered ourself
oid
=
object
.
_p_oid
oid
=
object
.
_p_oid
invalid
=
self
.
_invalid
invalid
=
self
.
_invalid
if
oid
is
None
or
object
.
_p_jar
is
not
self
:
if
oid
is
None
or
object
.
_p_jar
is
not
self
:
...
@@ -267,7 +273,12 @@ class Connection(ExportImport.ExportImport):
...
@@ -267,7 +273,12 @@ class Connection(ExportImport.ExportImport):
self
.
_creating
.
append
(
oid
)
self
.
_creating
.
append
(
oid
)
elif
object
.
_p_changed
:
elif
object
.
_p_changed
:
if
invalid
(
oid
)
or
invalid
(
None
):
raise
ConflictError
,
`oid`
if
(
(
invalid
(
oid
)
and
not
hasattr
(
object
,
'_p_resolveConflict'
))
or
invalid
(
None
)
):
raise
ConflictError
,
`oid`
self
.
_invalidating
.
append
(
oid
)
self
.
_invalidating
.
append
(
oid
)
else
:
else
:
...
@@ -328,7 +339,13 @@ class Connection(ExportImport.ExportImport):
...
@@ -328,7 +339,13 @@ class Connection(ExportImport.ExportImport):
self
.
_creating
.
append
(
oid
)
self
.
_creating
.
append
(
oid
)
else
:
else
:
#XXX We should never get here
#XXX We should never get here
if
invalid
(
oid
)
or
invalid
(
None
):
raise
ConflictError
,
`oid`
if
(
(
invalid
(
oid
)
and
not
hasattr
(
object
,
'_p_resolveConflict'
))
or
invalid
(
None
)
):
raise
ConflictError
,
`oid`
self
.
_invalidating
.
append
(
oid
)
self
.
_invalidating
.
append
(
oid
)
klass
=
object
.
__class__
klass
=
object
.
__class__
...
@@ -362,6 +379,10 @@ class Connection(ExportImport.ExportImport):
...
@@ -362,6 +379,10 @@ class Connection(ExportImport.ExportImport):
# Note that if s is false, then the storage defered the return
# Note that if s is false, then the storage defered the return
if
_type
(
s
)
is
_st
:
if
_type
(
s
)
is
_st
:
# normal case
# normal case
if
s
is
ResolvedSerial
:
# resolved conflict
object
.
_p_changed
=
None
else
:
object
.
_p_serial
=
s
object
.
_p_serial
=
s
object
.
_p_changed
=
0
object
.
_p_changed
=
0
else
:
else
:
...
@@ -389,6 +410,10 @@ class Connection(ExportImport.ExportImport):
...
@@ -389,6 +410,10 @@ class Connection(ExportImport.ExportImport):
tmp
=
self
.
_tmp
tmp
=
self
.
_tmp
if
tmp
is
_None
:
return
if
tmp
is
_None
:
return
src
=
self
.
_storage
src
=
self
.
_storage
LOG
(
'ZODB'
,
BLATHER
,
'Commiting subtransaction of size %s'
%
src
.
getSize
())
self
.
_storage
=
tmp
self
.
_storage
=
tmp
self
.
_tmp
=
_None
self
.
_tmp
=
_None
...
@@ -487,7 +512,13 @@ class Connection(ExportImport.ExportImport):
...
@@ -487,7 +512,13 @@ class Connection(ExportImport.ExportImport):
# notifications between the time we check and the time we
# notifications between the time we check and the time we
# read.
# read.
invalid
=
self
.
_invalid
invalid
=
self
.
_invalid
if
invalid
(
oid
)
or
invalid
(
None
):
raise
ConflictError
,
`oid`
if
invalid
(
oid
)
or
invalid
(
None
):
if
not
hasattr
(
object
.
__class__
,
'_p_independent'
):
get_transaction
().
register
(
self
)
raise
ConflictError
(
`oid`
,
`object.__class__`
)
invalid
=
1
else
:
invalid
=
0
file
=
StringIO
(
p
)
file
=
StringIO
(
p
)
unpickler
=
Unpickler
(
file
)
unpickler
=
Unpickler
(
file
)
...
@@ -503,6 +534,14 @@ class Connection(ExportImport.ExportImport):
...
@@ -503,6 +534,14 @@ class Connection(ExportImport.ExportImport):
object
.
_p_serial
=
serial
object
.
_p_serial
=
serial
if
invalid
:
if
object
.
_p_independent
():
try
:
del
self
.
_invalidated
[
oid
]
except
KeyError
:
pass
else
:
get_transaction
().
register
(
self
)
raise
ConflictError
(
`oid`
,
`object.__class__`
)
except
:
except
:
t
,
v
=
sys
.
exc_info
()[:
2
]
t
,
v
=
sys
.
exc_info
()[:
2
]
LOG
(
'ZODB'
,
ERROR
,
"Couldn't load state for %s"
%
`oid`
,
LOG
(
'ZODB'
,
ERROR
,
"Couldn't load state for %s"
%
`oid`
,
...
...
trunk/src/ZODB/FileStorage.py
View file @
2a1ae51f
...
@@ -184,7 +184,7 @@
...
@@ -184,7 +184,7 @@
# may have a back pointer to a version record or to a non-version
# may have a back pointer to a version record or to a non-version
# record.
# record.
#
#
__version__
=
'$Revision: 1.5
0
$'
[
11
:
-
2
]
__version__
=
'$Revision: 1.5
1
$'
[
11
:
-
2
]
import
struct
,
time
,
os
,
bpthread
,
string
,
base64
,
sys
import
struct
,
time
,
os
,
bpthread
,
string
,
base64
,
sys
from
struct
import
pack
,
unpack
from
struct
import
pack
,
unpack
...
@@ -197,6 +197,7 @@ from zLOG import LOG, WARNING, ERROR, PANIC, register_subsystem
...
@@ -197,6 +197,7 @@ from zLOG import LOG, WARNING, ERROR, PANIC, register_subsystem
register_subsystem
(
'ZODB FS'
)
register_subsystem
(
'ZODB FS'
)
import
BaseStorage
import
BaseStorage
from
cPickle
import
Pickler
,
Unpickler
from
cPickle
import
Pickler
,
Unpickler
import
ConflictResolution
try
:
from
posix
import
fsync
try
:
from
posix
import
fsync
except
:
fsync
=
None
except
:
fsync
=
None
...
@@ -240,7 +241,8 @@ class FileStorageQuotaError(FileStorageError,
...
@@ -240,7 +241,8 @@ class FileStorageQuotaError(FileStorageError,
packed_version
=
'FS21'
packed_version
=
'FS21'
class
FileStorage
(
BaseStorage
.
BaseStorage
):
class
FileStorage
(
BaseStorage
.
BaseStorage
,
ConflictResolution
.
ConflictResolvingStorage
):
_packt
=
z64
_packt
=
z64
def
__init__
(
self
,
file_name
,
create
=
0
,
read_only
=
0
,
stop
=
None
,
def
__init__
(
self
,
file_name
,
create
=
0
,
read_only
=
0
,
stop
=
None
,
...
@@ -663,18 +665,23 @@ class FileStorage(BaseStorage.BaseStorage):
...
@@ -663,18 +665,23 @@ class FileStorage(BaseStorage.BaseStorage):
raise
POSException
.
VersionLockError
,
(
raise
POSException
.
VersionLockError
,
(
`oid`
,
locked_version
)
`oid`
,
locked_version
)
if
serial
!=
oserial
:
raise
POSException
.
ConflictError
,
(
if
serial
!=
oserial
:
data
=
self
.
tryToResolveConflict
(
oid
,
oserial
,
serial
,
data
)
if
not
data
:
raise
POSException
.
ConflictError
,
(
serial
,
oserial
)
serial
,
oserial
)
else
:
oserial
=
serial
tfile
=
self
.
_tfile
tfile
=
self
.
_tfile
write
=
tfile
.
write
write
=
tfile
.
write
pos
=
self
.
_pos
pos
=
self
.
_pos
here
=
pos
+
(
tfile
.
tell
()
+
self
.
_thl
)
here
=
pos
+
(
tfile
.
tell
()
+
self
.
_thl
)
self
.
_tappend
((
oid
,
here
))
self
.
_tappend
((
oid
,
here
))
serial
=
self
.
_serial
new
serial
=
self
.
_serial
write
(
pack
(
">8s8s8s8sH8s"
,
write
(
pack
(
">8s8s8s8sH8s"
,
oid
,
serial
,
p64
(
old
),
p64
(
pos
),
oid
,
newserial
,
p64
(
old
),
p64
(
pos
),
len
(
version
),
p64
(
len
(
data
))
len
(
version
),
p64
(
len
(
data
))
)
)
)
)
if
version
:
if
version
:
...
@@ -695,7 +702,8 @@ class FileStorage(BaseStorage.BaseStorage):
...
@@ -695,7 +702,8 @@ class FileStorage(BaseStorage.BaseStorage):
raise
FileStorageQuotaError
,
(
raise
FileStorageQuotaError
,
(
'The storage quota has been exceeded.'
)
'The storage quota has been exceeded.'
)
return
serial
return
(
serial
==
oserial
and
newserial
or
ConflictResolution
.
ResolvedSerial
)
finally
:
finally
:
self
.
_lock_release
()
self
.
_lock_release
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment