Commit af0bfe6d authored by Leif Walsh's avatar Leif Walsh Committed by Yoni Fogel

[t:3977] added a test in test3884 and fixed the bug

git-svn-id: file:///svn/toku/tokudb@35065 c7de825b-a66e-492c-adef-691d508d4ae1
parent 90d968d0
...@@ -1311,11 +1311,24 @@ brtleaf_get_split_loc( ...@@ -1311,11 +1311,24 @@ brtleaf_get_split_loc(
curr_le = v; curr_le = v;
assert_zero(r); assert_zero(r);
size_so_far += leafentry_disksize(curr_le); size_so_far += leafentry_disksize(curr_le);
if (size_so_far >= sumlesizes/2 || if (size_so_far >= sumlesizes/2) {
(i == node->n_children - 1 &&
j == n_leafentries - 2)) {
*bn_index = i; *bn_index = i;
*le_index = j; *le_index = j;
if ((*bn_index == node->n_children - 1) &&
((unsigned int) *le_index == n_leafentries - 1)) {
// need to correct for when we're splitting after the
// last element, that makes no sense
if (*le_index > 0) {
(*le_index)--;
} else if (*bn_index > 0) {
(*bn_index)--;
*le_index = toku_omt_size(BLB_BUFFER(node, *bn_index)) - 1;
} else {
// we are trying to split a leaf with only one
// leafentry in it
assert(FALSE);
}
}
goto exit; goto exit;
} }
} }
......
...@@ -123,6 +123,190 @@ test_split_on_boundary(void) ...@@ -123,6 +123,190 @@ test_split_on_boundary(void)
toku_free(sn.childkeys); toku_free(sn.childkeys);
} }
static void
test_split_with_everything_on_the_left(void)
{
const int nodesize = 1024, eltsize = 64, bnsize = 256;
const int keylen = sizeof(long), vallen = eltsize - keylen - (sizeof(((LEAFENTRY)NULL)->type) // overhead from LE_CLEAN_MEMSIZE
+sizeof(((LEAFENTRY)NULL)->keylen)
+sizeof(((LEAFENTRY)NULL)->u.clean.vallen));
const int eltsperbn = bnsize / eltsize;
struct brtnode sn;
int fd = open(__FILE__ ".brt", O_RDWR|O_CREAT|O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO); assert(fd >= 0);
int r;
sn.max_msn_applied_to_node_on_disk.msn = 0;
sn.nodesize = nodesize;
sn.flags = 0x11223344;
sn.thisnodename.b = 20;
sn.layout_version = BRT_LAYOUT_VERSION;
sn.layout_version_original = BRT_LAYOUT_VERSION;
sn.height = 0;
const int nelts = 2 * nodesize / eltsize;
sn.n_children = nelts * eltsize / bnsize + 1;
sn.dirty = 1;
LEAFENTRY elts[nelts];
MALLOC_N(sn.n_children, sn.bp);
MALLOC_N(sn.n_children - 1, sn.childkeys);
sn.totalchildkeylens = 0;
LEAFENTRY big_element;
char *big_val;
for (int bn = 0; bn < sn.n_children; ++bn) {
BP_SUBTREE_EST(&sn,bn).ndata = random() + (((long long)random())<<32);
BP_SUBTREE_EST(&sn,bn).nkeys = random() + (((long long)random())<<32);
BP_SUBTREE_EST(&sn,bn).dsize = random() + (((long long)random())<<32);
BP_SUBTREE_EST(&sn,bn).exact = (BOOL)(random()%2 != 0);
BP_STATE(&sn,bn) = PT_AVAIL;
set_BLB(&sn, bn, toku_create_empty_bn());
BLB_NBYTESINBUF(&sn,bn) = 0;
BLB_OPTIMIZEDFORUPGRADE(&sn, bn) = BRT_LAYOUT_VERSION;
long k;
if (bn < sn.n_children - 1) {
for (int i = 0; i < eltsperbn; ++i) {
k = bn * eltsperbn + i;
char val[vallen];
memset(val, k, sizeof val);
elts[k] = le_fastmalloc((char *) &k, keylen, val, vallen);
r = toku_omt_insert(BLB_BUFFER(&sn, bn), elts[k], omt_long_cmp, elts[k], NULL); assert(r == 0);
BLB_NBYTESINBUF(&sn, bn) += OMT_ITEM_OVERHEAD + leafentry_disksize(elts[k]);
}
sn.childkeys[bn] = kv_pair_malloc(&k, sizeof k, 0, 0);
sn.totalchildkeylens += (sizeof k);
} else {
k = bn * eltsperbn;
big_val = toku_xmalloc(nelts * eltsize - 1);
memset(big_val, k, nelts * eltsize - 1);
big_element = le_fastmalloc((char *) &k, keylen, big_val, nelts * eltsize - 1);
r = toku_omt_insert(BLB_BUFFER(&sn, bn), big_element, omt_long_cmp, big_element, NULL); assert(r == 0);
BLB_NBYTESINBUF(&sn, bn) += OMT_ITEM_OVERHEAD + leafentry_disksize(big_element);
}
}
unlink(fname);
CACHETABLE ct;
BRT brt;
r = toku_brt_create_cachetable(&ct, 0, ZERO_LSN, NULL_LOGGER); assert(r==0);
r = toku_open_brt(fname, 1, &brt, nodesize, bnsize, ct, null_txn, toku_builtin_compare_fun, null_db); assert(r==0);
BRTNODE nodea, nodeb;
DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries
brtleaf_split(brt, &sn, &nodea, &nodeb, &splitk, TRUE);
toku_unpin_brtnode(brt, nodeb);
r = toku_close_brt(brt, NULL); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0);
if (splitk.data) {
toku_free(splitk.data);
}
for (int i = 0; i < sn.n_children - 1; ++i) {
kv_pair_free(sn.childkeys[i]);
}
for (int i = 0; i < sn.n_children; ++i) {
toku_omt_free_items(BLB_BUFFER(&sn, i));
destroy_basement_node(BLB(&sn, i));
}
toku_free(sn.bp);
toku_free(sn.childkeys);
toku_free(big_val);
}
static void
test_split_on_boundary_of_last_node(void)
{
const int nodesize = 1024, eltsize = 64, bnsize = 256;
const int keylen = sizeof(long), vallen = eltsize - keylen - (sizeof(((LEAFENTRY)NULL)->type) // overhead from LE_CLEAN_MEMSIZE
+sizeof(((LEAFENTRY)NULL)->keylen)
+sizeof(((LEAFENTRY)NULL)->u.clean.vallen));
const int eltsperbn = bnsize / eltsize;
struct brtnode sn;
int fd = open(__FILE__ ".brt", O_RDWR|O_CREAT|O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO); assert(fd >= 0);
int r;
sn.max_msn_applied_to_node_on_disk.msn = 0;
sn.nodesize = nodesize;
sn.flags = 0x11223344;
sn.thisnodename.b = 20;
sn.layout_version = BRT_LAYOUT_VERSION;
sn.layout_version_original = BRT_LAYOUT_VERSION;
sn.height = 0;
const int nelts = 2 * nodesize / eltsize;
sn.n_children = nelts * eltsize / bnsize + 1;
sn.dirty = 1;
LEAFENTRY elts[nelts];
MALLOC_N(sn.n_children, sn.bp);
MALLOC_N(sn.n_children - 1, sn.childkeys);
sn.totalchildkeylens = 0;
LEAFENTRY big_element;
char *big_val;
for (int bn = 0; bn < sn.n_children; ++bn) {
BP_SUBTREE_EST(&sn,bn).ndata = random() + (((long long)random())<<32);
BP_SUBTREE_EST(&sn,bn).nkeys = random() + (((long long)random())<<32);
BP_SUBTREE_EST(&sn,bn).dsize = random() + (((long long)random())<<32);
BP_SUBTREE_EST(&sn,bn).exact = (BOOL)(random()%2 != 0);
BP_STATE(&sn,bn) = PT_AVAIL;
set_BLB(&sn, bn, toku_create_empty_bn());
BLB_NBYTESINBUF(&sn,bn) = 0;
BLB_OPTIMIZEDFORUPGRADE(&sn, bn) = BRT_LAYOUT_VERSION;
long k;
if (bn < sn.n_children - 1) {
for (int i = 0; i < eltsperbn; ++i) {
k = bn * eltsperbn + i;
char val[vallen];
memset(val, k, sizeof val);
elts[k] = le_fastmalloc((char *) &k, keylen, val, vallen);
r = toku_omt_insert(BLB_BUFFER(&sn, bn), elts[k], omt_long_cmp, elts[k], NULL); assert(r == 0);
BLB_NBYTESINBUF(&sn, bn) += OMT_ITEM_OVERHEAD + leafentry_disksize(elts[k]);
}
sn.childkeys[bn] = kv_pair_malloc(&k, sizeof k, 0, 0);
sn.totalchildkeylens += (sizeof k);
} else {
k = bn * eltsperbn;
big_val = toku_xmalloc(nelts * eltsize - 100);
memset(big_val, k, nelts * eltsize - 100);
big_element = le_fastmalloc((char *) &k, keylen, big_val, nelts * eltsize - 100);
r = toku_omt_insert(BLB_BUFFER(&sn, bn), big_element, omt_long_cmp, big_element, NULL); assert(r == 0);
BLB_NBYTESINBUF(&sn, bn) += OMT_ITEM_OVERHEAD + leafentry_disksize(big_element);
}
}
unlink(fname);
CACHETABLE ct;
BRT brt;
r = toku_brt_create_cachetable(&ct, 0, ZERO_LSN, NULL_LOGGER); assert(r==0);
r = toku_open_brt(fname, 1, &brt, nodesize, bnsize, ct, null_txn, toku_builtin_compare_fun, null_db); assert(r==0);
BRTNODE nodea, nodeb;
DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries
brtleaf_split(brt, &sn, &nodea, &nodeb, &splitk, TRUE);
toku_unpin_brtnode(brt, nodeb);
r = toku_close_brt(brt, NULL); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0);
if (splitk.data) {
toku_free(splitk.data);
}
for (int i = 0; i < sn.n_children - 1; ++i) {
kv_pair_free(sn.childkeys[i]);
}
for (int i = 0; i < sn.n_children; ++i) {
toku_omt_free_items(BLB_BUFFER(&sn, i));
destroy_basement_node(BLB(&sn, i));
}
toku_free(sn.bp);
toku_free(sn.childkeys);
toku_free(big_val);
}
static void static void
test_split_at_begin(void) test_split_at_begin(void)
{ {
...@@ -316,6 +500,8 @@ test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute_ ...@@ -316,6 +500,8 @@ test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute_
toku_memory_check = 1; toku_memory_check = 1;
test_split_on_boundary(); test_split_on_boundary();
test_split_with_everything_on_the_left();
test_split_on_boundary_of_last_node();
test_split_at_begin(); test_split_at_begin();
test_split_at_end(); test_split_at_end();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment