Commit 6b0c108c authored by Yoni Fogel's avatar Yoni Fogel

Refs Tokutek/ft-index#46 Killed dmt_functor<> template. Added dmtwriter_t...

Refs Tokutek/ft-index#46 Killed dmt_functor<> template.  Added dmtwriter_t template parameter to dmt and some documentation
parent eef0ab78
This diff is collapsed.
This diff is collapsed.
......@@ -97,28 +97,6 @@ PATENT RIGHTS GRANT:
#include <util/mempool.h>
#include "dmt-wrapper.h"
namespace toku {
template<>
class dmt_functor<DMTVALUE> {
public:
size_t get_dmtdatain_t_size(void) const {
return sizeof(DMTVALUE);
}
void write_dmtdata_t_to(DMTVALUE *const dest) const {
*dest = value;
}
dmt_functor(DMTVALUE _value)
: value(_value) {}
dmt_functor(const uint32_t size UU(), DMTVALUE *const src)
: value(*src) {
paranoid_invariant(size == sizeof(DMTVALUE));
}
private:
const DMTVALUE value;
};
}
int
toku_dmt_create_steal_sorted_array(DMT *dmtp, DMTVALUE **valuesp, uint32_t numvalues, uint32_t capacity) {
//TODO: implement using create_steal_sorted_array when it exists
......@@ -167,7 +145,7 @@ int toku_dmt_create_from_sorted_array(DMT *dmtp, DMTVALUE *values, uint32_t numv
}
int toku_dmt_insert_at(DMT dmt, DMTVALUE value, uint32_t index) {
toku::dmt_functor<DMTVALUE> functor(value);
dmt_wrapper_internal::dmtvalue_writer functor(value);
return dmt->insert_at(functor, index);
}
......@@ -222,7 +200,7 @@ int call_heftor(const uint32_t size, const DMTVALUE &v, const heftor &htor) {
int toku_dmt_insert(DMT dmt, DMTVALUE value, int(*h)(DMTVALUE, void*v), void *v, uint32_t *index) {
struct heftor htor = { .h = h, .v = v };
toku::dmt_functor<DMTVALUE> functor(value);
dmt_wrapper_internal::dmtvalue_writer functor(value);
return dmt->insert<heftor, call_heftor>(functor, htor, index);
}
......
......@@ -143,8 +143,32 @@ PATENT RIGHTS GRANT:
//typedef struct value *DMTVALUE; // A slight improvement over using void*.
#include <util/dmt.h>
typedef void *DMTVALUE;
typedef toku::dmt<DMTVALUE> *DMT;
namespace dmt_wrapper_internal {
class dmtvalue_writer {
public:
size_t get_size(void) const {
return sizeof(DMTVALUE);
}
void write_to(DMTVALUE *const dest) const {
*dest = value;
}
dmtvalue_writer(DMTVALUE _value)
: value(_value) {}
dmtvalue_writer(const uint32_t size UU(), DMTVALUE *const src)
: value(*src) {
paranoid_invariant(size == sizeof(DMTVALUE));
}
private:
const DMTVALUE value;
};
};
typedef toku::dmt<DMTVALUE, DMTVALUE, dmt_wrapper_internal::dmtvalue_writer> *DMT;
int toku_dmt_create (DMT *dmtp);
......
......@@ -689,16 +689,16 @@ ftleaf_get_split_loc(
switch (split_mode) {
case SPLIT_LEFT_HEAVY: {
*num_left_bns = node->n_children;
*num_left_les = BLB_DATA(node, *num_left_bns - 1)->omt_size();
*num_left_les = BLB_DATA(node, *num_left_bns - 1)->dmt_size();
if (*num_left_les == 0) {
*num_left_bns = node->n_children - 1;
*num_left_les = BLB_DATA(node, *num_left_bns - 1)->omt_size();
*num_left_les = BLB_DATA(node, *num_left_bns - 1)->dmt_size();
}
goto exit;
}
case SPLIT_RIGHT_HEAVY: {
*num_left_bns = 1;
*num_left_les = BLB_DATA(node, 0)->omt_size() ? 1 : 0;
*num_left_les = BLB_DATA(node, 0)->dmt_size() ? 1 : 0;
goto exit;
}
case SPLIT_EVENLY: {
......@@ -707,8 +707,8 @@ ftleaf_get_split_loc(
uint64_t sumlesizes = ftleaf_disk_size(node);
uint32_t size_so_far = 0;
for (int i = 0; i < node->n_children; i++) {
BN_DATA bd = BLB_DATA(node, i);
uint32_t n_leafentries = bd->omt_size();
bn_data* bd = BLB_DATA(node, i);
uint32_t n_leafentries = bd->dmt_size();
for (uint32_t j=0; j < n_leafentries; j++) {
size_t size_this_le;
int rr = bd->fetch_klpair_disksize(j, &size_this_le);
......@@ -725,7 +725,7 @@ ftleaf_get_split_loc(
(*num_left_les)--;
} else if (*num_left_bns > 1) {
(*num_left_bns)--;
*num_left_les = BLB_DATA(node, *num_left_bns - 1)->omt_size();
*num_left_les = BLB_DATA(node, *num_left_bns - 1)->dmt_size();
} else {
// we are trying to split a leaf with only one
// leafentry in it
......@@ -851,7 +851,7 @@ ftleaf_split(
ftleaf_get_split_loc(node, split_mode, &num_left_bns, &num_left_les);
{
// did we split right on the boundary between basement nodes?
const bool split_on_boundary = (num_left_les == 0) || (num_left_les == (int) BLB_DATA(node, num_left_bns - 1)->omt_size());
const bool split_on_boundary = (num_left_les == 0) || (num_left_les == (int) BLB_DATA(node, num_left_bns - 1)->dmt_size());
// Now we know where we are going to break it
// the two nodes will have a total of n_children+1 basement nodes
// and n_children-1 pivots
......@@ -912,7 +912,7 @@ ftleaf_split(
move_leafentries(BLB(B, curr_dest_bn_index),
BLB(node, curr_src_bn_index),
num_left_les, // first row to be moved to B
BLB_DATA(node, curr_src_bn_index)->omt_size() // number of rows in basement to be split
BLB_DATA(node, curr_src_bn_index)->dmt_size() // number of rows in basement to be split
);
BLB_MAX_MSN_APPLIED(B, curr_dest_bn_index) = BLB_MAX_MSN_APPLIED(node, curr_src_bn_index);
curr_dest_bn_index++;
......@@ -954,10 +954,10 @@ ftleaf_split(
toku_destroy_dbt(&node->childkeys[num_left_bns - 1]);
}
} else if (splitk) {
BN_DATA bd = BLB_DATA(node, num_left_bns - 1);
bn_data* bd = BLB_DATA(node, num_left_bns - 1);
uint32_t keylen;
void *key;
int rr = bd->fetch_le_key_and_len(bd->omt_size() - 1, &keylen, &key);
int rr = bd->fetch_key_and_len(bd->dmt_size() - 1, &keylen, &key);
invariant_zero(rr);
toku_memdup_dbt(splitk, key, keylen);
}
......@@ -1168,11 +1168,11 @@ merge_leaf_nodes(FTNODE a, FTNODE b)
a->dirty = 1;
b->dirty = 1;
BN_DATA a_last_bd = BLB_DATA(a, a->n_children-1);
bn_data* a_last_bd = BLB_DATA(a, a->n_children-1);
// this bool states if the last basement node in a has any items or not
// If it does, then it stays in the merge. If it does not, the last basement node
// of a gets eliminated because we do not have a pivot to store for it (because it has no elements)
const bool a_has_tail = a_last_bd->omt_size() > 0;
const bool a_has_tail = a_last_bd->dmt_size() > 0;
// move each basement node from b to a
// move the pivots, adding one of what used to be max(a)
......@@ -1199,7 +1199,7 @@ merge_leaf_nodes(FTNODE a, FTNODE b)
if (a_has_tail) {
uint32_t keylen;
void *key;
int rr = a_last_bd->fetch_le_key_and_len(a_last_bd->omt_size() - 1, &keylen, &key);
int rr = a_last_bd->fetch_key_and_len(a_last_bd->dmt_size() - 1, &keylen, &key);
invariant_zero(rr);
toku_memdup_dbt(&a->childkeys[a->n_children-1], key, keylen);
a->totalchildkeylens += keylen;
......
......@@ -419,7 +419,7 @@ get_leaf_num_entries(FTNODE node) {
int i;
toku_assert_entire_node_in_memory(node);
for ( i = 0; i < node->n_children; i++) {
result += BLB_DATA(node, i)->omt_size();
result += BLB_DATA(node, i)->dmt_size();
}
return result;
}
......@@ -1720,7 +1720,7 @@ toku_ft_bn_apply_cmd_once (
oldsize = leafentry_memsize(le) + key_storage_size;
}
// toku_le_apply_msg() may call mempool_malloc_from_omt() to allocate more space.
// toku_le_apply_msg() may call bn_data::mempool_malloc_and_update_dmt() to allocate more space.
// That means le is guaranteed to not cause a sigsegv but it may point to a mempool that is
// no longer in use. We'll have to release the old mempool later.
toku_le_apply_msg(
......@@ -1910,7 +1910,7 @@ toku_ft_bn_apply_cmd (
void* key = NULL;
uint32_t keylen = 0;
uint32_t omt_size;
uint32_t dmt_size;
int r;
struct cmd_leafval_heaviside_extra be = {compare_fun, desc, cmd->u.id.key};
......@@ -1922,9 +1922,9 @@ toku_ft_bn_apply_cmd (
case FT_INSERT: {
uint32_t idx;
if (doing_seqinsert) {
idx = bn->data_buffer.omt_size();
idx = bn->data_buffer.dmt_size();
DBT kdbt;
r = bn->data_buffer.fetch_le_key_and_len(idx-1, &kdbt.size, &kdbt.data);
r = bn->data_buffer.fetch_key_and_len(idx-1, &kdbt.size, &kdbt.data);
if (r != 0) goto fz;
int cmp = toku_cmd_leafval_heaviside(kdbt, be);
if (cmp >= 0) goto fz;
......@@ -1950,7 +1950,7 @@ toku_ft_bn_apply_cmd (
// the leaf then it is sequential
// window = min(32, number of leaf entries/16)
{
uint32_t s = bn->data_buffer.omt_size();
uint32_t s = bn->data_buffer.dmt_size();
uint32_t w = s / 16;
if (w == 0) w = 1;
if (w > 32) w = 32;
......@@ -1985,8 +1985,8 @@ toku_ft_bn_apply_cmd (
case FT_COMMIT_BROADCAST_ALL:
case FT_OPTIMIZE:
// Apply to all leafentries
omt_size = bn->data_buffer.omt_size();
for (uint32_t idx = 0; idx < omt_size; ) {
dmt_size = bn->data_buffer.dmt_size();
for (uint32_t idx = 0; idx < dmt_size; ) {
DBT curr_keydbt;
void* curr_keyp = NULL;
uint32_t curr_keylen = 0;
......@@ -2000,26 +2000,26 @@ toku_ft_bn_apply_cmd (
if (!le_is_clean(storeddata)) { //If already clean, nothing to do.
toku_ft_bn_apply_cmd_once(bn, cmd, idx, storeddata, oldest_referenced_xid_known, gc_info, workdone, stats_to_update);
// at this point, we cannot trust cmd->u.id.key to be valid.
uint32_t new_omt_size = bn->data_buffer.omt_size();
if (new_omt_size != omt_size) {
paranoid_invariant(new_omt_size+1 == omt_size);
uint32_t new_dmt_size = bn->data_buffer.dmt_size();
if (new_dmt_size != dmt_size) {
paranoid_invariant(new_dmt_size+1 == dmt_size);
//Item was deleted.
deleted = 1;
}
}
if (deleted)
omt_size--;
dmt_size--;
else
idx++;
}
paranoid_invariant(bn->data_buffer.omt_size() == omt_size);
paranoid_invariant(bn->data_buffer.dmt_size() == dmt_size);
break;
case FT_COMMIT_BROADCAST_TXN:
case FT_ABORT_BROADCAST_TXN:
// Apply to all leafentries if txn is represented
omt_size = bn->data_buffer.omt_size();
for (uint32_t idx = 0; idx < omt_size; ) {
dmt_size = bn->data_buffer.dmt_size();
for (uint32_t idx = 0; idx < dmt_size; ) {
DBT curr_keydbt;
void* curr_keyp = NULL;
uint32_t curr_keylen = 0;
......@@ -2032,19 +2032,19 @@ toku_ft_bn_apply_cmd (
int deleted = 0;
if (le_has_xids(storeddata, cmd->xids)) {
toku_ft_bn_apply_cmd_once(bn, cmd, idx, storeddata, oldest_referenced_xid_known, gc_info, workdone, stats_to_update);
uint32_t new_omt_size = bn->data_buffer.omt_size();
if (new_omt_size != omt_size) {
paranoid_invariant(new_omt_size+1 == omt_size);
uint32_t new_dmt_size = bn->data_buffer.dmt_size();
if (new_dmt_size != dmt_size) {
paranoid_invariant(new_dmt_size+1 == dmt_size);
//Item was deleted.
deleted = 1;
}
}
if (deleted)
omt_size--;
dmt_size--;
else
idx++;
}
paranoid_invariant(bn->data_buffer.omt_size() == omt_size);
paranoid_invariant(bn->data_buffer.dmt_size() == dmt_size);
break;
case FT_UPDATE: {
......@@ -2073,7 +2073,7 @@ toku_ft_bn_apply_cmd (
// apply to all leafentries.
uint32_t idx = 0;
uint32_t num_leafentries_before;
while (idx < (num_leafentries_before = bn->data_buffer.omt_size())) {
while (idx < (num_leafentries_before = bn->data_buffer.dmt_size())) {
void* curr_key = nullptr;
uint32_t curr_keylen = 0;
r = bn->data_buffer.fetch_klpair(idx, &storeddata, &curr_keylen, &curr_key);
......@@ -2091,7 +2091,7 @@ toku_ft_bn_apply_cmd (
r = do_update(update_fun, desc, bn, cmd, idx, storeddata, curr_key, curr_keylen, oldest_referenced_xid_known, gc_info, workdone, stats_to_update);
assert_zero(r);
if (num_leafentries_before == bn->data_buffer.omt_size()) {
if (num_leafentries_before == bn->data_buffer.dmt_size()) {
// we didn't delete something, so increment the index.
idx++;
}
......@@ -2404,7 +2404,7 @@ basement_node_gc_all_les(BASEMENTNODE bn,
int r = 0;
uint32_t index = 0;
uint32_t num_leafentries_before;
while (index < (num_leafentries_before = bn->data_buffer.omt_size())) {
while (index < (num_leafentries_before = bn->data_buffer.dmt_size())) {
void* keyp = NULL;
uint32_t keylen = 0;
LEAFENTRY leaf_entry;
......@@ -2423,7 +2423,7 @@ basement_node_gc_all_les(BASEMENTNODE bn,
delta
);
// Check if the leaf entry was deleted or not.
if (num_leafentries_before == bn->data_buffer.omt_size()) {
if (num_leafentries_before == bn->data_buffer.dmt_size()) {
++index;
}
}
......@@ -4929,7 +4929,7 @@ ok: ;
switch (search->direction) {
case FT_SEARCH_LEFT:
idx++;
if (idx >= bn->data_buffer.omt_size()) {
if (idx >= bn->data_buffer.dmt_size()) {
if (ftcursor->interrupt_cb && ftcursor->interrupt_cb(ftcursor->interrupt_cb_extra)) {
return TOKUDB_INTERRUPTED;
}
......@@ -5604,7 +5604,7 @@ ft_cursor_shortcut (
int r = 0;
// if we are searching towards the end, limit is last element
// if we are searching towards the beginning, limit is the first element
uint32_t limit = (direction > 0) ? (bd->omt_size() - 1) : 0;
uint32_t limit = (direction > 0) ? (bd->dmt_size() - 1) : 0;
//Starting with the prev, find the first real (non-provdel) leafentry.
while (index != limit) {
......@@ -5895,7 +5895,7 @@ keysrange_in_leaf_partition (FT_HANDLE brt, FTNODE node,
*less = idx_left;
*equal_left = (r==0) ? 1 : 0;
uint32_t size = bn->data_buffer.omt_size();
uint32_t size = bn->data_buffer.dmt_size();
uint32_t idx_right = size;
r = -1;
if (single_basement && key_right) {
......@@ -6155,7 +6155,7 @@ static int get_key_after_bytes_in_basementnode(FT ft, BASEMENTNODE bn, const DBT
assert(r == 0 || r == DB_NOTFOUND);
}
struct get_key_after_bytes_iterate_extra iter_extra = {skip_len, skipped, callback, cb_extra};
r = bn->data_buffer.omt_iterate_on_range<get_key_after_bytes_iterate_extra, get_key_after_bytes_iterate>(idx_left, bn->data_buffer.omt_size(), &iter_extra);
r = bn->data_buffer.dmt_iterate_on_range<get_key_after_bytes_iterate_extra, get_key_after_bytes_iterate>(idx_left, bn->data_buffer.dmt_size(), &iter_extra);
// Invert the sense of r == 0 (meaning the iterate finished, which means we didn't find what we wanted)
if (r == 1) {
......@@ -6351,7 +6351,7 @@ toku_dump_ftnode (FILE *file, FT_HANDLE brt, BLOCKNUM blocknum, int depth, const
});
}
else {
int size = BLB_DATA(node, i)->omt_size();
int size = BLB_DATA(node, i)->dmt_size();
if (0)
for (int j=0; j<size; j++) {
LEAFENTRY le;
......@@ -6531,9 +6531,9 @@ static bool is_empty_fast_iter (FT_HANDLE brt, FTNODE node) {
}
return 1;
} else {
// leaf: If the omt is empty, we are happy.
// leaf: If the dmt is empty, we are happy.
for (int i = 0; i < node->n_children; i++) {
if (BLB_DATA(node, i)->omt_size()) {
if (BLB_DATA(node, i)->dmt_size()) {
return false;
}
}
......
......@@ -152,7 +152,7 @@ verify_msg_in_child_buffer(FT_HANDLE brt, enum ft_msg_type type, MSN msn, byteve
static DBT
get_ith_key_dbt (BASEMENTNODE bn, int i) {
DBT kdbt;
int r = bn->data_buffer.fetch_le_key_and_len(i, &kdbt.size, &kdbt.data);
int r = bn->data_buffer.fetch_key_and_len(i, &kdbt.size, &kdbt.data);
invariant_zero(r); // this is a bad failure if it happens.
return kdbt;
}
......@@ -424,7 +424,7 @@ toku_verify_ftnode_internal(FT_HANDLE brt,
}
else {
BASEMENTNODE bn = BLB(node, i);
for (uint32_t j = 0; j < bn->data_buffer.omt_size(); j++) {
for (uint32_t j = 0; j < bn->data_buffer.dmt_size(); j++) {
VERIFY_ASSERTION((rootmsn.msn >= this_msn.msn), 0, "leaf may have latest msn, but cannot be greater than root msn");
DBT kdbt = get_ith_key_dbt(bn, j);
if (curr_less_pivot) {
......
......@@ -1077,8 +1077,8 @@ garbage_helper(BLOCKNUM blocknum, int64_t UU(size), int64_t UU(address), void *e
goto exit;
}
for (int i = 0; i < node->n_children; ++i) {
BN_DATA bd = BLB_DATA(node, i);
r = bd->omt_iterate<struct garbage_helper_extra, garbage_leafentry_helper>(info);
bn_data* bd = BLB_DATA(node, i);
r = bd->dmt_iterate<struct garbage_helper_extra, garbage_leafentry_helper>(info);
if (r != 0) {
goto exit;
}
......
......@@ -375,10 +375,10 @@ serialize_ftnode_partition(FTNODE node, int i, struct sub_block *sb) {
}
else {
unsigned char ch = FTNODE_PARTITION_OMT_LEAVES;
BN_DATA bd = BLB_DATA(node, i);
bn_data* bd = BLB_DATA(node, i);
wbuf_nocrc_char(&wb, ch);
wbuf_nocrc_uint(&wb, bd->omt_size());
wbuf_nocrc_uint(&wb, bd->dmt_size());
bd->prepare_to_serialize();
bd->serialize_header(&wb);
......@@ -386,7 +386,7 @@ serialize_ftnode_partition(FTNODE node, int i, struct sub_block *sb) {
//
// iterate over leafentries and place them into the buffer
//
bd->omt_iterate<struct wbuf, wbufwriteleafentry>(&wb);
bd->dmt_iterate<struct wbuf, wbufwriteleafentry>(&wb);
} else {
bd->serialize_rest(&wb);
}
......@@ -552,7 +552,7 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize)
// Count number of leaf entries in this leaf (num_le).
uint32_t num_le = 0;
for (uint32_t i = 0; i < num_orig_basements; i++) {
num_le += BLB_DATA(node, i)->omt_size();
num_le += BLB_DATA(node, i)->dmt_size();
}
uint32_t num_alloc = num_le ? num_le : 1; // simplify logic below by always having at least one entry per array
......@@ -577,10 +577,10 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize)
uint32_t curr_le = 0;
for (uint32_t i = 0; i < num_orig_basements; i++) {
BN_DATA bd = BLB_DATA(node, i);
bn_data* bd = BLB_DATA(node, i);
struct array_info ai {.offset = curr_le, .le_array = leafpointers, .key_sizes_array = key_sizes, .key_ptr_array = key_pointers };
bd->omt_iterate<array_info, array_item>(&ai);
curr_le += bd->omt_size();
bd->dmt_iterate<array_info, array_item>(&ai);
curr_le += bd->dmt_size();
}
// Create an array that will store indexes of new pivots.
......@@ -702,8 +702,8 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize)
uint32_t num_les_to_copy = num_les_this_bn[i];
invariant(num_les_to_copy == num_in_bn);
BN_DATA bd = BLB_DATA(node, i);
bd->replace_contents_with_clone_of_sorted_array(
bn_data* bd = BLB_DATA(node, i);
bd->set_contents_as_clone_of_sorted_array(
num_les_to_copy,
&key_pointers[baseindex_this_bn],
&key_sizes[baseindex_this_bn],
......@@ -1560,7 +1560,7 @@ deserialize_ftnode_partition(
data_size -= rb.ndone; // remaining bytes of leafentry data
BASEMENTNODE bn = BLB(node, childnum);
bn->data_buffer.initialize_from_data(num_entries, &rb, data_size, node->layout_version_read_from_disk);
bn->data_buffer.deserialize_from_rbuf(num_entries, &rb, data_size, node->layout_version_read_from_disk);
}
assert(rb.ndone == rb.size);
exit:
......@@ -2112,7 +2112,7 @@ deserialize_and_upgrade_leaf_node(FTNODE node,
if (has_end_to_end_checksum) {
data_size -= sizeof(uint32_t);
}
bn->data_buffer.initialize_from_data(n_in_buf, rb, data_size, node->layout_version_read_from_disk);
bn->data_buffer.deserialize_from_rbuf(n_in_buf, rb, data_size, node->layout_version_read_from_disk);
}
// Whatever this is must be less than the MSNs of every message above
......
......@@ -2917,7 +2917,7 @@ static void add_pair_to_leafnode (struct leaf_buf *lbuf, unsigned char *key, int
// #3588 TODO just make a clean ule and append it to the omt
// #3588 TODO can do the rebalancing here and avoid a lot of work later
FTNODE leafnode = lbuf->node;
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size();
uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
DBT thekey = { .data = key, .size = (uint32_t) keylen };
DBT theval = { .data = val, .size = (uint32_t) vallen };
FT_MSG_S cmd = { .type = FT_INSERT,
......
......@@ -234,7 +234,7 @@ typedef struct cachetable *CACHETABLE;
typedef struct cachefile *CACHEFILE;
typedef struct ctpair *PAIR;
typedef class checkpointer *CHECKPOINTER;
typedef class bn_data *BN_DATA;
class bn_data;
/* tree command types */
enum ft_msg_type {
......
......@@ -131,27 +131,26 @@ struct val_type {
};
namespace toku {
template<>
class dmt_functor<val_type> {
class vwriter {
public:
size_t get_dmtdatain_t_size(void) const {
size_t get_size(void) const {
size_t len = strlen(v.c);
invariant(len < sizeof(val_type));
return len + 1;
}
void write_dmtdata_t_to(val_type *const dest) const {
void write_to(val_type *const dest) const {
strcpy(dest->c, v.c);
}
dmt_functor(const char* c) {
vwriter(const char* c) {
invariant(strlen(c) < sizeof(val_type));
strcpy(v.c, c);
}
dmt_functor(const uint32_t klpair_len, val_type *const src) {
vwriter(const uint32_t klpair_len, val_type *const src) {
invariant(strlen(src->c) < sizeof(val_type));
strcpy(v.c, src->c);
invariant(klpair_len == get_dmtdatain_t_size());
invariant(klpair_len == get_size());
}
private:
val_type v;
......@@ -159,8 +158,7 @@ class dmt_functor<val_type> {
}
/* Globals */
typedef toku::dmt<val_type, val_type*> vdmt;
typedef toku::dmt_functor<val_type> vfunctor;
typedef toku::dmt<val_type, val_type*, toku::vwriter> vdmt;
const unsigned int random_seed = 0xFEADACBA;
......@@ -211,7 +209,7 @@ static void test_builder_fixed(uint32_t len, uint32_t num) {
builder.create(num, num * len);
for (uint32_t i = 0; i < num; i++) {
vfunctor vfun(data[i]);
vwriter vfun(data[i]);
builder.append(vfun);
}
invariant(builder.value_length_is_fixed());
......@@ -230,7 +228,7 @@ static void test_builder_fixed(uint32_t len, uint32_t num) {
v2.delete_at(change);
fail_one_verify(len, num, &v2);
vfunctor vfun(data[change]);
vwriter vfun(data[change]);
v2.insert_at(vfun, change);
verify(len, num, &v2);
v2.destroy();
......@@ -258,7 +256,7 @@ static void test_builder_variable(uint32_t len, uint32_t len2, uint32_t num) {
builder.create(num, (num-1) * len + len2);
for (uint32_t i = 0; i < num; i++) {
vfunctor vfun(data[i]);
vwriter vfun(data[i]);
builder.append(vfun);
}
invariant(!builder.value_length_is_fixed());
......
......@@ -357,7 +357,7 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) {
if (bn > 0) {
assert(dest_ndd[bn].start >= dest_ndd[bn-1].start + dest_ndd[bn-1].size);
}
for (uint32_t i = 0; i < BLB_DATA(dn, bn)->omt_size(); i++) {
for (uint32_t i = 0; i < BLB_DATA(dn, bn)->dmt_size(); i++) {
LEAFENTRY curr_le;
uint32_t curr_keylen;
void* curr_key;
......@@ -431,7 +431,7 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone
if (i < nrows-1) {
uint32_t keylen;
void* curr_key;
BLB_DATA(&sn, i)->fetch_le_key_and_len(0, &keylen, &curr_key);
BLB_DATA(&sn, i)->fetch_key_and_len(0, &keylen, &curr_key);
toku_memdup_dbt(&sn.childkeys[i], curr_key, keylen);
}
}
......@@ -499,8 +499,8 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone
if (bn > 0) {
assert(dest_ndd[bn].start >= dest_ndd[bn-1].start + dest_ndd[bn-1].size);
}
assert(BLB_DATA(dn, bn)->omt_size() > 0);
for (uint32_t i = 0; i < BLB_DATA(dn, bn)->omt_size(); i++) {
assert(BLB_DATA(dn, bn)->dmt_size() > 0);
for (uint32_t i = 0; i < BLB_DATA(dn, bn)->dmt_size(); i++) {
LEAFENTRY curr_le;
uint32_t curr_keylen;
void* curr_key;
......@@ -631,8 +631,8 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) {
if (bn > 0) {
assert(dest_ndd[bn].start >= dest_ndd[bn-1].start + dest_ndd[bn-1].size);
}
assert(BLB_DATA(dn, bn)->omt_size() > 0);
for (uint32_t i = 0; i < BLB_DATA(dn, bn)->omt_size(); i++) {
assert(BLB_DATA(dn, bn)->dmt_size() > 0);
for (uint32_t i = 0; i < BLB_DATA(dn, bn)->dmt_size(); i++) {
LEAFENTRY curr_le;
uint32_t curr_keylen;
void* curr_key;
......@@ -781,8 +781,8 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone)
if (bn > 0) {
assert(dest_ndd[bn].start >= dest_ndd[bn-1].start + dest_ndd[bn-1].size);
}
assert(BLB_DATA(dn, bn)->omt_size() > 0);
for (uint32_t i = 0; i < BLB_DATA(dn, bn)->omt_size(); i++) {
assert(BLB_DATA(dn, bn)->dmt_size() > 0);
for (uint32_t i = 0; i < BLB_DATA(dn, bn)->dmt_size(); i++) {
LEAFENTRY curr_le;
uint32_t curr_keylen;
void* curr_key;
......@@ -919,7 +919,7 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool
if (bn > 0) {
assert(dest_ndd[bn].start >= dest_ndd[bn-1].start + dest_ndd[bn-1].size);
}
for (uint32_t i = 0; i < BLB_DATA(dn, bn)->omt_size(); i++) {
for (uint32_t i = 0; i < BLB_DATA(dn, bn)->dmt_size(); i++) {
LEAFENTRY curr_le;
uint32_t curr_keylen;
void* curr_key;
......@@ -1040,7 +1040,7 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b
if (i > 0) {
assert(dest_ndd[i].start >= dest_ndd[i-1].start + dest_ndd[i-1].size);
}
assert(BLB_DATA(dn, i)->omt_size() == 0);
assert(BLB_DATA(dn, i)->dmt_size() == 0);
}
}
toku_ftnode_free(&dn);
......
......@@ -119,7 +119,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
DBT theval; toku_fill_dbt(&theval, val, vallen);
// get an index that we can use to create a new leaf entry
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size();
uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
MSN msn = next_dummymsn();
......
......@@ -733,7 +733,7 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) {
int total_messages = 0;
for (i = 0; i < 8; ++i) {
total_messages += BLB_DATA(child, i)->omt_size();
total_messages += BLB_DATA(child, i)->dmt_size();
}
assert(total_messages <= num_parent_messages + num_child_messages);
......@@ -746,7 +746,7 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) {
memset(parent_messages_present, 0, sizeof parent_messages_present);
memset(child_messages_present, 0, sizeof child_messages_present);
for (int j = 0; j < 8; ++j) {
uint32_t len = BLB_DATA(child, j)->omt_size();
uint32_t len = BLB_DATA(child, j)->dmt_size();
for (uint32_t idx = 0; idx < len; ++idx) {
LEAFENTRY le;
DBT keydbt, valdbt;
......@@ -968,7 +968,7 @@ flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) {
int total_messages = 0;
for (i = 0; i < 8; ++i) {
total_messages += BLB_DATA(child, i)->omt_size();
total_messages += BLB_DATA(child, i)->dmt_size();
}
assert(total_messages <= num_parent_messages + num_child_messages);
......@@ -1144,10 +1144,10 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) {
toku_ftnode_free(&parentnode);
for (int j = 0; j < 8; ++j) {
BN_DATA first = BLB_DATA(child1, j);
BN_DATA second = BLB_DATA(child2, j);
uint32_t len = first->omt_size();
assert(len == second->omt_size());
bn_data* first = BLB_DATA(child1, j);
bn_data* second = BLB_DATA(child2, j);
uint32_t len = first->dmt_size();
assert(len == second->dmt_size());
for (uint32_t idx = 0; idx < len; ++idx) {
LEAFENTRY le1, le2;
DBT key1dbt, val1dbt, key2dbt, val2dbt;
......
......@@ -348,7 +348,7 @@ doit (int state) {
assert(node->height == 0);
assert(!node->dirty);
assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->omt_size() == 1);
assert(BLB_DATA(node, 0)->dmt_size() == 1);
toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
toku_pin_ftnode_off_client_thread(
......@@ -364,7 +364,7 @@ doit (int state) {
assert(node->height == 0);
assert(!node->dirty);
assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->omt_size() == 1);
assert(BLB_DATA(node, 0)->dmt_size() == 1);
toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
}
else if (state == ft_flush_aflter_merge || state == flt_flush_before_unpin_remove) {
......@@ -381,7 +381,7 @@ doit (int state) {
assert(node->height == 0);
assert(!node->dirty);
assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->omt_size() == 2);
assert(BLB_DATA(node, 0)->dmt_size() == 2);
toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
}
else {
......
......@@ -359,7 +359,7 @@ doit (int state) {
assert(node->height == 0);
assert(!node->dirty);
assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->omt_size() == 2);
assert(BLB_DATA(node, 0)->dmt_size() == 2);
toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
toku_pin_ftnode_off_client_thread(
......@@ -375,7 +375,7 @@ doit (int state) {
assert(node->height == 0);
assert(!node->dirty);
assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->omt_size() == 2);
assert(BLB_DATA(node, 0)->dmt_size() == 2);
toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
......
......@@ -342,7 +342,7 @@ doit (bool after_split) {
assert(node->height == 0);
assert(!node->dirty);
assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->omt_size() == 1);
assert(BLB_DATA(node, 0)->dmt_size() == 1);
toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
toku_pin_ftnode_off_client_thread(
......@@ -358,7 +358,7 @@ doit (bool after_split) {
assert(node->height == 0);
assert(!node->dirty);
assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->omt_size() == 1);
assert(BLB_DATA(node, 0)->dmt_size() == 1);
toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
}
else {
......@@ -375,7 +375,7 @@ doit (bool after_split) {
assert(node->height == 0);
assert(!node->dirty);
assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->omt_size() == 2);
assert(BLB_DATA(node, 0)->dmt_size() == 2);
toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
}
......
......@@ -122,7 +122,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
DBT theval; toku_fill_dbt(&theval, val, vallen);
// get an index that we can use to create a new leaf entry
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size();
uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
MSN msn = next_dummymsn();
......
......@@ -111,7 +111,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
DBT theval; toku_fill_dbt(&theval, val, vallen);
// get an index that we can use to create a new leaf entry
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size();
uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
// apply an insert to the leaf node
MSN msn = next_dummymsn();
......
......@@ -112,7 +112,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
DBT theval; toku_fill_dbt(&theval, val, vallen);
// get an index that we can use to create a new leaf entry
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size();
uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
// apply an insert to the leaf node
MSN msn = next_dummymsn();
......
......@@ -111,7 +111,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
DBT theval; toku_fill_dbt(&theval, val, vallen);
// get an index that we can use to create a new leaf entry
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size();
uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
// apply an insert to the leaf node
MSN msn = next_dummymsn();
......
......@@ -112,7 +112,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
DBT theval; toku_fill_dbt(&theval, val, vallen);
// get an index that we can use to create a new leaf entry
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size();
uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
// apply an insert to the leaf node
MSN msn = next_dummymsn();
......
......@@ -114,7 +114,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
toku_fill_dbt(&theval, val, vallen);
// get an index that we can use to create a new leaf entry
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size();
uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
// apply an insert to the leaf node
MSN msn = next_dummymsn();
......
......@@ -111,7 +111,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
DBT theval; toku_fill_dbt(&theval, val, vallen);
// get an index that we can use to create a new leaf entry
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size();
uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
// apply an insert to the leaf node
MSN msn = next_dummymsn();
......
......@@ -315,9 +315,9 @@ dump_node (int f, BLOCKNUM blocknum, FT h) {
}
} else {
printf(" n_bytes_in_buffer= %" PRIu64 "", BLB_DATA(n, i)->get_disk_size());
printf(" items_in_buffer=%u\n", BLB_DATA(n, i)->omt_size());
printf(" items_in_buffer=%u\n", BLB_DATA(n, i)->dmt_size());
if (dump_data) {
BLB_DATA(n, i)->omt_iterate<void, print_le>(NULL);
BLB_DATA(n, i)->dmt_iterate<void, print_le>(NULL);
}
}
}
......
This diff is collapsed.
......@@ -197,43 +197,40 @@ class dmt_node_templated {
using namespace toku::dmt_internal;
// Each data type used in a dmt requires a dmt_functor (allows you to insert/etc with dynamic sized types).
// Each data type used in a dmt requires a dmt_writer class (allows you to insert/etc with dynamic sized types).
// There is no default implementation.
template<typename dmtdata_t>
class dmt_functor {
// Ensures that if you forget to use partial specialization this compile error will remind you to use it.
// We would use static_assert(false, ...) here except that it would cause a compile error even if dmt_functor<>
// We instead use an expression that evaluates to false that the compiler won't evaluate unless dmt_functor<> is used.
static_assert(!std::is_same<dmtdata_t, dmtdata_t>::value, "Cannot use default dmt_functor<>. Use partial specialization.");
// Defines the interface:
static size_t get_dmtdata_t_size(const dmtdata_t &) { return 0; }
size_t get_dmtdatain_t_size(void) { return 0; }
void write_dmtdata_t_to(dmtdata_t *const dest) {}
};
// A dmtwriter instance handles reading/writing 'dmtdata_t's to/from the dmt.
// The class must implement the following functions:
// The size required in a dmt for the dmtdata_t represented:
// size_t get_size(void) const;
// Write the dmtdata_t to memory owned by a dmt:
// void write_to(dmtdata_t *const dest) const;
// Constructor (others are allowed, but this one is required)
// dmtwriter(const uint32_t dmtdata_t_len, dmtdata_t *const src)
template<typename dmtdata_t,
typename dmtdataout_t=dmtdata_t
typename dmtdataout_t,
typename dmtwriter_t
>
class dmt {
private:
typedef dmt_node_templated<dmtdata_t> dmt_node;
typedef dmt_functor<dmtdata_t> dmtdatain_t;
public:
static const uint8_t ALIGNMENT = 4;
class builder {
public:
void append(const dmtdatain_t &value);
void append(const dmtwriter_t &value);
void create(uint32_t n_values, uint32_t n_value_bytes);
bool value_length_is_fixed(void);
void build(dmt<dmtdata_t, dmtdataout_t> *dest);
void build(dmt<dmtdata_t, dmtdataout_t, dmtwriter_t> *dest);
private:
uint32_t max_values;
uint32_t max_value_bytes;
node_offset *sorted_node_offsets;
bool temp_valid;
dmt<dmtdata_t, dmtdataout_t> temp;
dmt<dmtdata_t, dmtdataout_t, dmtwriter_t> temp;
};
/**
......@@ -306,7 +303,7 @@ class dmt {
* Rationale: Some future implementation may be O(\log N) worst-case time, but O(\log N) amortized is good enough for now.
*/
template<typename dmtcmp_t, int (*h)(const uint32_t size, const dmtdata_t &, const dmtcmp_t &)>
int insert(const dmtdatain_t &value, const dmtcmp_t &v, uint32_t *const idx);
int insert(const dmtwriter_t &value, const dmtcmp_t &v, uint32_t *const idx);
/**
* Effect: Increases indexes of all items at slot >= idx by 1.
......@@ -318,7 +315,7 @@ class dmt {
* Performance: time=O(\log N) amortized time.
* Rationale: Some future implementation may be O(\log N) worst-case time, but O(\log N) amortized is good enough for now.
*/
int insert_at(const dmtdatain_t &value, const uint32_t idx);
int insert_at(const dmtwriter_t &value, const uint32_t idx);
/**
* Effect: Delete the item in slot idx.
......@@ -557,9 +554,9 @@ class dmt {
uint32_t nweight(const subtree &subtree) const;
node_offset node_malloc_and_set_value(const dmtdatain_t &value);
node_offset node_malloc_and_set_value(const dmtwriter_t &value);
void node_set_value(dmt_node *n, const dmtdatain_t &value);
void node_set_value(dmt_node *n, const dmtwriter_t &value);
void node_free(const subtree &st);
......@@ -567,15 +564,15 @@ class dmt {
void convert_to_tree(void);
void maybe_resize_tree(const dmtdatain_t * value);
void maybe_resize_tree(const dmtwriter_t * value);
bool will_need_rebalance(const subtree &subtree, const int leftmod, const int rightmod) const;
__attribute__((nonnull))
void insert_internal(subtree *const subtreep, const dmtdatain_t &value, const uint32_t idx, subtree **const rebalance_subtree);
void insert_internal(subtree *const subtreep, const dmtwriter_t &value, const uint32_t idx, subtree **const rebalance_subtree);
template<bool with_resize>
int insert_at_array_end(const dmtdatain_t& value_in);
int insert_at_array_end(const dmtwriter_t& value_in);
dmtdata_t * alloc_array_value_end(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment