Commit 6b0c108c authored by Yoni Fogel's avatar Yoni Fogel

Refs Tokutek/ft-index#46 Killed dmt_functor<> template. Added dmtwriter_t...

Refs Tokutek/ft-index#46 Killed dmt_functor<> template.  Added dmtwriter_t template parameter to dmt and some documentation
parent eef0ab78
...@@ -157,7 +157,7 @@ void bn_data::initialize_from_separate_keys_and_vals(uint32_t num_entries, struc ...@@ -157,7 +157,7 @@ void bn_data::initialize_from_separate_keys_and_vals(uint32_t num_entries, struc
void bn_data::prepare_to_serialize(void) { void bn_data::prepare_to_serialize(void) {
if (m_buffer.value_length_is_fixed()) { if (m_buffer.value_length_is_fixed()) {
m_buffer.prepare_for_serialize(); m_buffer.prepare_for_serialize();
omt_compress_kvspace(0, nullptr, true); // Gets it ready for easy serialization. dmt_compress_kvspace(0, nullptr, true); // Gets it ready for easy serialization.
} }
} }
...@@ -182,7 +182,7 @@ void bn_data::serialize_rest(struct wbuf *wb) const { ...@@ -182,7 +182,7 @@ void bn_data::serialize_rest(struct wbuf *wb) const {
m_buffer.serialize_values(m_disksize_of_keys, wb); m_buffer.serialize_values(m_disksize_of_keys, wb);
//Write leafentries //Write leafentries
//Just ran omt_compress_kvspace so there is no fragmentation and also leafentries are in sorted order. //Just ran dmt_compress_kvspace so there is no fragmentation and also leafentries are in sorted order.
paranoid_invariant(toku_mempool_get_frag_size(&m_buffer_mempool) == 0); paranoid_invariant(toku_mempool_get_frag_size(&m_buffer_mempool) == 0);
uint32_t val_data_size = toku_mempool_get_used_space(&m_buffer_mempool); uint32_t val_data_size = toku_mempool_get_used_space(&m_buffer_mempool);
wbuf_nocrc_literal_bytes(wb, toku_mempool_get_base(&m_buffer_mempool), val_data_size); wbuf_nocrc_literal_bytes(wb, toku_mempool_get_base(&m_buffer_mempool), val_data_size);
...@@ -194,7 +194,7 @@ bool bn_data::need_to_serialize_each_leafentry_with_key(void) const { ...@@ -194,7 +194,7 @@ bool bn_data::need_to_serialize_each_leafentry_with_key(void) const {
} }
// Deserialize from rbuf // Deserialize from rbuf
void bn_data::initialize_from_data(uint32_t num_entries, struct rbuf *rb, uint32_t data_size, uint32_t version) { void bn_data::deserialize_from_rbuf(uint32_t num_entries, struct rbuf *rb, uint32_t data_size, uint32_t version) {
uint32_t key_data_size = data_size; // overallocate if < version 25 (best guess that is guaranteed not too small) uint32_t key_data_size = data_size; // overallocate if < version 25 (best guess that is guaranteed not too small)
uint32_t val_data_size = data_size; // overallocate if < version 25 (best guess that is guaranteed not too small) uint32_t val_data_size = data_size; // overallocate if < version 25 (best guess that is guaranteed not too small)
...@@ -232,7 +232,7 @@ void bn_data::initialize_from_data(uint32_t num_entries, struct rbuf *rb, uint32 ...@@ -232,7 +232,7 @@ void bn_data::initialize_from_data(uint32_t num_entries, struct rbuf *rb, uint32
klpair_dmt_t::builder dmt_builder; klpair_dmt_t::builder dmt_builder;
dmt_builder.create(num_entries, key_data_size); dmt_builder.create(num_entries, key_data_size);
unsigned char *newmem = NULL; unsigned char *newmem = nullptr;
// add same wiggle room that toku_mempool_construct would, 25% extra // add same wiggle room that toku_mempool_construct would, 25% extra
uint32_t allocated_bytes_vals = val_data_size + val_data_size/4; uint32_t allocated_bytes_vals = val_data_size + val_data_size/4;
CAST_FROM_VOIDP(newmem, toku_xmalloc(allocated_bytes_vals)); CAST_FROM_VOIDP(newmem, toku_xmalloc(allocated_bytes_vals));
...@@ -245,7 +245,7 @@ void bn_data::initialize_from_data(uint32_t num_entries, struct rbuf *rb, uint32 ...@@ -245,7 +245,7 @@ void bn_data::initialize_from_data(uint32_t num_entries, struct rbuf *rb, uint32
// to do so, we must extract it from the leafentry // to do so, we must extract it from the leafentry
// and write it in // and write it in
uint32_t keylen = 0; uint32_t keylen = 0;
const void* keyp = NULL; const void* keyp = nullptr;
keylen = *(uint32_t *)curr_src_pos; keylen = *(uint32_t *)curr_src_pos;
curr_src_pos += sizeof(uint32_t); curr_src_pos += sizeof(uint32_t);
uint32_t clean_vallen = 0; uint32_t clean_vallen = 0;
...@@ -267,7 +267,7 @@ void bn_data::initialize_from_data(uint32_t num_entries, struct rbuf *rb, uint32 ...@@ -267,7 +267,7 @@ void bn_data::initialize_from_data(uint32_t num_entries, struct rbuf *rb, uint32
curr_src_pos += keylen; curr_src_pos += keylen;
} }
uint32_t le_offset = curr_dest_pos - newmem; uint32_t le_offset = curr_dest_pos - newmem;
dmt_builder.append(toku::dmt_functor<klpair_struct>(keylen, le_offset, keyp)); dmt_builder.append(klpair_dmtwriter(keylen, le_offset, keyp));
add_key(keylen); add_key(keylen);
// now curr_dest_pos is pointing to where the leafentry should be packed // now curr_dest_pos is pointing to where the leafentry should be packed
...@@ -355,12 +355,12 @@ void bn_data::delete_leafentry ( ...@@ -355,12 +355,12 @@ void bn_data::delete_leafentry (
/* mempool support */ /* mempool support */
struct omt_compressor_state { struct dmt_compressor_state {
struct mempool *new_kvspace; struct mempool *new_kvspace;
class bn_data *bd; class bn_data *bd;
}; };
static int move_it (const uint32_t, klpair_struct *klpair, const uint32_t idx UU(), struct omt_compressor_state * const oc) { static int move_it (const uint32_t, klpair_struct *klpair, const uint32_t idx UU(), struct dmt_compressor_state * const oc) {
LEAFENTRY old_le = oc->bd->get_le_from_klpair(klpair); LEAFENTRY old_le = oc->bd->get_le_from_klpair(klpair);
uint32_t size = leafentry_memsize(old_le); uint32_t size = leafentry_memsize(old_le);
void* newdata = toku_mempool_malloc(oc->new_kvspace, size, 1); void* newdata = toku_mempool_malloc(oc->new_kvspace, size, 1);
...@@ -372,7 +372,7 @@ static int move_it (const uint32_t, klpair_struct *klpair, const uint32_t idx UU ...@@ -372,7 +372,7 @@ static int move_it (const uint32_t, klpair_struct *klpair, const uint32_t idx UU
// Compress things, and grow the mempool if needed. // Compress things, and grow the mempool if needed.
// May (always if force_compress) have a side effect of putting contents of mempool in sorted order. // May (always if force_compress) have a side effect of putting contents of mempool in sorted order.
void bn_data::omt_compress_kvspace(size_t added_size, void **maybe_free, bool force_compress) { void bn_data::dmt_compress_kvspace(size_t added_size, void **maybe_free, bool force_compress) {
uint32_t total_size_needed = toku_mempool_get_used_space(&m_buffer_mempool) + added_size; uint32_t total_size_needed = toku_mempool_get_used_space(&m_buffer_mempool) + added_size;
// set the new mempool size to be twice of the space we actually need. // set the new mempool size to be twice of the space we actually need.
// On top of the 25% that is padded within toku_mempool_construct (which we // On top of the 25% that is padded within toku_mempool_construct (which we
...@@ -390,7 +390,7 @@ void bn_data::omt_compress_kvspace(size_t added_size, void **maybe_free, bool fo ...@@ -390,7 +390,7 @@ void bn_data::omt_compress_kvspace(size_t added_size, void **maybe_free, bool fo
} }
struct mempool new_kvspace; struct mempool new_kvspace;
toku_mempool_construct(&new_kvspace, 2*total_size_needed); toku_mempool_construct(&new_kvspace, 2*total_size_needed);
struct omt_compressor_state oc = { &new_kvspace, this}; struct dmt_compressor_state oc = { &new_kvspace, this};
m_buffer.iterate_ptr< decltype(oc), move_it >(&oc); m_buffer.iterate_ptr< decltype(oc), move_it >(&oc);
if (maybe_free) { if (maybe_free) {
...@@ -403,12 +403,12 @@ void bn_data::omt_compress_kvspace(size_t added_size, void **maybe_free, bool fo ...@@ -403,12 +403,12 @@ void bn_data::omt_compress_kvspace(size_t added_size, void **maybe_free, bool fo
// Effect: Allocate a new object of size SIZE in MP. If MP runs out of space, allocate new a new mempool space, and copy all the items // Effect: Allocate a new object of size SIZE in MP. If MP runs out of space, allocate new a new mempool space, and copy all the items
// from the OMT (which items refer to items in the old mempool) into the new mempool. // from the OMT (which items refer to items in the old mempool) into the new mempool.
// If MAYBE_FREE is NULL then free the old mempool's space. // If MAYBE_FREE is nullptr then free the old mempool's space.
// Otherwise, store the old mempool's space in maybe_free. // Otherwise, store the old mempool's space in maybe_free.
LEAFENTRY bn_data::mempool_malloc_and_update_omt(size_t size, void **maybe_free) { LEAFENTRY bn_data::mempool_malloc_and_update_dmt(size_t size, void **maybe_free) {
void *v = toku_mempool_malloc(&m_buffer_mempool, size, 1); void *v = toku_mempool_malloc(&m_buffer_mempool, size, 1);
if (v == NULL) { if (v == nullptr) {
omt_compress_kvspace(size, maybe_free, false); dmt_compress_kvspace(size, maybe_free, false);
v = toku_mempool_malloc(&m_buffer_mempool, size, 1); v = toku_mempool_malloc(&m_buffer_mempool, size, 1);
paranoid_invariant_notnull(v); paranoid_invariant_notnull(v);
} }
...@@ -425,12 +425,12 @@ void bn_data::get_space_for_overwrite( ...@@ -425,12 +425,12 @@ void bn_data::get_space_for_overwrite(
) )
{ {
void* maybe_free = nullptr; void* maybe_free = nullptr;
LEAFENTRY new_le = mempool_malloc_and_update_omt( LEAFENTRY new_le = mempool_malloc_and_update_dmt(
new_size, new_size,
&maybe_free &maybe_free
); );
toku_mempool_mfree(&m_buffer_mempool, nullptr, old_le_size); // Must pass nullptr, since le is no good any more. toku_mempool_mfree(&m_buffer_mempool, nullptr, old_le_size); // Must pass nullptr, since le is no good any more.
KLPAIR klp = nullptr; klpair_struct* klp = nullptr;
uint32_t klpair_len; uint32_t klpair_len;
int r = m_buffer.fetch(idx, &klpair_len, &klp); int r = m_buffer.fetch(idx, &klpair_len, &klp);
invariant_zero(r); invariant_zero(r);
...@@ -463,13 +463,13 @@ void bn_data::get_space_for_insert( ...@@ -463,13 +463,13 @@ void bn_data::get_space_for_insert(
add_key(keylen); add_key(keylen);
void* maybe_free = nullptr; void* maybe_free = nullptr;
LEAFENTRY new_le = mempool_malloc_and_update_omt( LEAFENTRY new_le = mempool_malloc_and_update_dmt(
size, size,
&maybe_free &maybe_free
); );
size_t new_le_offset = toku_mempool_get_offset_from_pointer_and_base(&this->m_buffer_mempool, new_le); size_t new_le_offset = toku_mempool_get_offset_from_pointer_and_base(&this->m_buffer_mempool, new_le);
toku::dmt_functor<klpair_struct> kl(keylen, new_le_offset, keyp); klpair_dmtwriter kl(keylen, new_le_offset, keyp);
m_buffer.insert_at(kl, idx); m_buffer.insert_at(kl, idx);
*new_le_space = new_le; *new_le_space = new_le;
...@@ -483,15 +483,15 @@ void bn_data::get_space_for_insert( ...@@ -483,15 +483,15 @@ void bn_data::get_space_for_insert(
} }
void bn_data::move_leafentries_to( void bn_data::move_leafentries_to(
BN_DATA dest_bd, bn_data* dest_bd,
uint32_t lbi, //lower bound inclusive uint32_t lbi, //lower bound inclusive
uint32_t ube //upper bound exclusive uint32_t ube //upper bound exclusive
) )
//Effect: move leafentries in the range [lbi, ube) from this to src_omt to newly created dest_omt //Effect: move leafentries in the range [lbi, ube) from this to src_dmt to newly created dest_dmt
{ {
//TODO: improve speed: maybe use dmt_builder for one or both, or implement some version of optimized split_at? //TODO: improve speed: maybe use dmt_builder for one or both, or implement some version of optimized split_at?
paranoid_invariant(lbi < ube); paranoid_invariant(lbi < ube);
paranoid_invariant(ube <= omt_size()); paranoid_invariant(ube <= dmt_size());
dest_bd->initialize_empty(); dest_bd->initialize_empty();
...@@ -501,7 +501,7 @@ void bn_data::move_leafentries_to( ...@@ -501,7 +501,7 @@ void bn_data::move_leafentries_to(
toku_mempool_construct(dest_mp, mpsize); toku_mempool_construct(dest_mp, mpsize);
for (uint32_t i = lbi; i < ube; i++) { for (uint32_t i = lbi; i < ube; i++) {
KLPAIR curr_kl = nullptr; klpair_struct* curr_kl = nullptr;
uint32_t curr_kl_len; uint32_t curr_kl_len;
int r = m_buffer.fetch(i, &curr_kl_len, &curr_kl); int r = m_buffer.fetch(i, &curr_kl_len, &curr_kl);
invariant_zero(r); invariant_zero(r);
...@@ -511,7 +511,7 @@ void bn_data::move_leafentries_to( ...@@ -511,7 +511,7 @@ void bn_data::move_leafentries_to(
void* new_le = toku_mempool_malloc(dest_mp, le_size, 1); void* new_le = toku_mempool_malloc(dest_mp, le_size, 1);
memcpy(new_le, old_le, le_size); memcpy(new_le, old_le, le_size);
size_t le_offset = toku_mempool_get_offset_from_pointer_and_base(dest_mp, new_le); size_t le_offset = toku_mempool_get_offset_from_pointer_and_base(dest_mp, new_le);
dest_bd->m_buffer.insert_at(dmt_functor<klpair_struct>(keylen_from_klpair_len(curr_kl_len), le_offset, curr_kl->key), i-lbi); dest_bd->m_buffer.insert_at(klpair_dmtwriter(keylen_from_klpair_len(curr_kl_len), le_offset, curr_kl->key), i-lbi);
this->remove_key(keylen_from_klpair_len(curr_kl_len)); this->remove_key(keylen_from_klpair_len(curr_kl_len));
dest_bd->add_key(keylen_from_klpair_len(curr_kl_len)); dest_bd->add_key(keylen_from_klpair_len(curr_kl_len));
...@@ -519,7 +519,7 @@ void bn_data::move_leafentries_to( ...@@ -519,7 +519,7 @@ void bn_data::move_leafentries_to(
toku_mempool_mfree(src_mp, old_le, le_size); toku_mempool_mfree(src_mp, old_le, le_size);
} }
// now remove the elements from src_omt // now remove the elements from src_dmt
for (uint32_t i=ube-1; i >= lbi; i--) { for (uint32_t i=ube-1; i >= lbi; i--) {
m_buffer.delete_at(i); m_buffer.delete_at(i);
} }
...@@ -548,9 +548,9 @@ static int verify_le_in_mempool (const uint32_t, klpair_struct *klpair, const ui ...@@ -548,9 +548,9 @@ static int verify_le_in_mempool (const uint32_t, klpair_struct *klpair, const ui
} }
//This is a debug-only (paranoid) verification. //This is a debug-only (paranoid) verification.
//Verifies the omt is valid, and all leafentries are entirely in the mempool's memory. //Verifies the dmt is valid, and all leafentries are entirely in the mempool's memory.
void bn_data::verify_mempool(void) { void bn_data::verify_mempool(void) {
//Verify the omt itself <- paranoid and slow //Verify the dmt itself <- paranoid and slow
m_buffer.verify(); m_buffer.verify();
verify_le_in_mempool_state state = { .offset_limit = toku_mempool_get_offset_limit(&m_buffer_mempool), .bd = this }; verify_le_in_mempool_state state = { .offset_limit = toku_mempool_get_offset_limit(&m_buffer_mempool), .bd = this };
...@@ -558,7 +558,7 @@ void bn_data::verify_mempool(void) { ...@@ -558,7 +558,7 @@ void bn_data::verify_mempool(void) {
m_buffer.iterate_ptr< decltype(state), verify_le_in_mempool >(&state); m_buffer.iterate_ptr< decltype(state), verify_le_in_mempool >(&state);
} }
uint32_t bn_data::omt_size(void) const { uint32_t bn_data::dmt_size(void) const {
return m_buffer.size(); return m_buffer.size();
} }
...@@ -569,7 +569,7 @@ void bn_data::destroy(void) { ...@@ -569,7 +569,7 @@ void bn_data::destroy(void) {
m_disksize_of_keys = 0; m_disksize_of_keys = 0;
} }
void bn_data::replace_contents_with_clone_of_sorted_array( void bn_data::set_contents_as_clone_of_sorted_array(
uint32_t num_les, uint32_t num_les,
const void** old_key_ptrs, const void** old_key_ptrs,
uint32_t* old_keylens, uint32_t* old_keylens,
...@@ -579,6 +579,12 @@ void bn_data::replace_contents_with_clone_of_sorted_array( ...@@ -579,6 +579,12 @@ void bn_data::replace_contents_with_clone_of_sorted_array(
size_t total_le_size size_t total_le_size
) )
{ {
//Enforce "just created" invariant.
paranoid_invariant_zero(m_disksize_of_keys);
paranoid_invariant_zero(dmt_size());
paranoid_invariant_null(toku_mempool_get_base(&m_buffer_mempool));
paranoid_invariant_zero(toku_mempool_get_size(&m_buffer_mempool));
toku_mempool_construct(&m_buffer_mempool, total_le_size); toku_mempool_construct(&m_buffer_mempool, total_le_size);
m_buffer.destroy(); m_buffer.destroy();
m_disksize_of_keys = 0; m_disksize_of_keys = 0;
...@@ -591,7 +597,7 @@ void bn_data::replace_contents_with_clone_of_sorted_array( ...@@ -591,7 +597,7 @@ void bn_data::replace_contents_with_clone_of_sorted_array(
void* new_le = toku_mempool_malloc(&m_buffer_mempool, le_sizes[idx], 1); void* new_le = toku_mempool_malloc(&m_buffer_mempool, le_sizes[idx], 1);
memcpy(new_le, old_les[idx], le_sizes[idx]); memcpy(new_le, old_les[idx], le_sizes[idx]);
size_t le_offset = toku_mempool_get_offset_from_pointer_and_base(&m_buffer_mempool, new_le); size_t le_offset = toku_mempool_get_offset_from_pointer_and_base(&m_buffer_mempool, new_le);
dmt_builder.append(dmt_functor<klpair_struct>(old_keylens[idx], le_offset, old_key_ptrs[idx])); dmt_builder.append(klpair_dmtwriter(old_keylens[idx], le_offset, old_key_ptrs[idx]));
add_key(old_keylens[idx]); add_key(old_keylens[idx]);
} }
dmt_builder.build(&this->m_buffer); dmt_builder.build(&this->m_buffer);
...@@ -606,7 +612,7 @@ LEAFENTRY bn_data::get_le_from_klpair(const klpair_struct *klpair) const { ...@@ -606,7 +612,7 @@ LEAFENTRY bn_data::get_le_from_klpair(const klpair_struct *klpair) const {
// get info about a single leafentry by index // get info about a single leafentry by index
int bn_data::fetch_le(uint32_t idx, LEAFENTRY *le) { int bn_data::fetch_le(uint32_t idx, LEAFENTRY *le) {
KLPAIR klpair = NULL; klpair_struct* klpair = nullptr;
int r = m_buffer.fetch(idx, nullptr, &klpair); int r = m_buffer.fetch(idx, nullptr, &klpair);
if (r == 0) { if (r == 0) {
*le = get_le_from_klpair(klpair); *le = get_le_from_klpair(klpair);
...@@ -615,7 +621,7 @@ int bn_data::fetch_le(uint32_t idx, LEAFENTRY *le) { ...@@ -615,7 +621,7 @@ int bn_data::fetch_le(uint32_t idx, LEAFENTRY *le) {
} }
int bn_data::fetch_klpair(uint32_t idx, LEAFENTRY *le, uint32_t *len, void** key) { int bn_data::fetch_klpair(uint32_t idx, LEAFENTRY *le, uint32_t *len, void** key) {
KLPAIR klpair = NULL; klpair_struct* klpair = nullptr;
uint32_t klpair_len; uint32_t klpair_len;
int r = m_buffer.fetch(idx, &klpair_len, &klpair); int r = m_buffer.fetch(idx, &klpair_len, &klpair);
if (r == 0) { if (r == 0) {
...@@ -627,7 +633,7 @@ int bn_data::fetch_klpair(uint32_t idx, LEAFENTRY *le, uint32_t *len, void** key ...@@ -627,7 +633,7 @@ int bn_data::fetch_klpair(uint32_t idx, LEAFENTRY *le, uint32_t *len, void** key
} }
int bn_data::fetch_klpair_disksize(uint32_t idx, size_t *size) { int bn_data::fetch_klpair_disksize(uint32_t idx, size_t *size) {
KLPAIR klpair = NULL; klpair_struct* klpair = nullptr;
uint32_t klpair_len; uint32_t klpair_len;
int r = m_buffer.fetch(idx, &klpair_len, &klpair); int r = m_buffer.fetch(idx, &klpair_len, &klpair);
if (r == 0) { if (r == 0) {
...@@ -636,8 +642,8 @@ int bn_data::fetch_klpair_disksize(uint32_t idx, size_t *size) { ...@@ -636,8 +642,8 @@ int bn_data::fetch_klpair_disksize(uint32_t idx, size_t *size) {
return r; return r;
} }
int bn_data::fetch_le_key_and_len(uint32_t idx, uint32_t *len, void** key) { int bn_data::fetch_key_and_len(uint32_t idx, uint32_t *len, void** key) {
KLPAIR klpair = NULL; klpair_struct* klpair = nullptr;
uint32_t klpair_len; uint32_t klpair_len;
int r = m_buffer.fetch(idx, &klpair_len, &klpair); int r = m_buffer.fetch(idx, &klpair_len, &klpair);
if (r == 0) { if (r == 0) {
......
...@@ -106,7 +106,6 @@ static constexpr uint32_t keylen_from_klpair_len(const uint32_t klpair_len) { ...@@ -106,7 +106,6 @@ static constexpr uint32_t keylen_from_klpair_len(const uint32_t klpair_len) {
return klpair_len - __builtin_offsetof(klpair_struct, key); return klpair_len - __builtin_offsetof(klpair_struct, key);
} }
typedef struct klpair_struct KLPAIR_S, *KLPAIR;
static_assert(__builtin_offsetof(klpair_struct, key) == 1*sizeof(uint32_t), "klpair alignment issues"); static_assert(__builtin_offsetof(klpair_struct, key) == 1*sizeof(uint32_t), "klpair alignment issues");
static_assert(__builtin_offsetof(klpair_struct, key) == sizeof(klpair_struct), "klpair size issues"); static_assert(__builtin_offsetof(klpair_struct, key) == sizeof(klpair_struct), "klpair size issues");
...@@ -116,7 +115,7 @@ static_assert(__builtin_offsetof(klpair_struct, key) == sizeof(klpair_struct), " ...@@ -116,7 +115,7 @@ static_assert(__builtin_offsetof(klpair_struct, key) == sizeof(klpair_struct), "
// Alternative to this wrapper is to expose accessor functions and rewrite all the external heaviside functions. // Alternative to this wrapper is to expose accessor functions and rewrite all the external heaviside functions.
template<typename dmtcmp_t, template<typename dmtcmp_t,
int (*h)(const DBT &, const dmtcmp_t &)> int (*h)(const DBT &, const dmtcmp_t &)>
static int wrappy_fun_find(const uint32_t klpair_len, const klpair_struct &klpair, const dmtcmp_t &extra) { static int klpair_find_wrapper(const uint32_t klpair_len, const klpair_struct &klpair, const dmtcmp_t &extra) {
DBT kdbt; DBT kdbt;
kdbt.data = const_cast<void*>(reinterpret_cast<const void*>(klpair.key)); kdbt.data = const_cast<void*>(reinterpret_cast<const void*>(klpair.key));
kdbt.size = keylen_from_klpair_len(klpair_len); kdbt.size = keylen_from_klpair_len(klpair_len);
...@@ -124,7 +123,7 @@ static int wrappy_fun_find(const uint32_t klpair_len, const klpair_struct &klpai ...@@ -124,7 +123,7 @@ static int wrappy_fun_find(const uint32_t klpair_len, const klpair_struct &klpai
} }
template<typename inner_iterate_extra_t> template<typename inner_iterate_extra_t>
struct wrapped_iterate_extra_t { struct klpair_iterate_extra {
public: public:
inner_iterate_extra_t *inner; inner_iterate_extra_t *inner;
const class bn_data * bd; const class bn_data * bd;
...@@ -135,7 +134,7 @@ struct wrapped_iterate_extra_t { ...@@ -135,7 +134,7 @@ struct wrapped_iterate_extra_t {
// Alternative to this wrapper is to expose accessor functions and rewrite all the external heaviside functions. // Alternative to this wrapper is to expose accessor functions and rewrite all the external heaviside functions.
template<typename iterate_extra_t, template<typename iterate_extra_t,
int (*f)(const void * key, const uint32_t keylen, const LEAFENTRY &, const uint32_t idx, iterate_extra_t *const)> int (*f)(const void * key, const uint32_t keylen, const LEAFENTRY &, const uint32_t idx, iterate_extra_t *const)>
static int wrappy_fun_iterate(const uint32_t klpair_len, const klpair_struct &klpair, const uint32_t idx, wrapped_iterate_extra_t<iterate_extra_t> *const extra) { static int klpair_iterate_wrapper(const uint32_t klpair_len, const klpair_struct &klpair, const uint32_t idx, klpair_iterate_extra<iterate_extra_t> *const extra) {
const void* key = &klpair.key; const void* key = &klpair.key;
LEAFENTRY le = extra->bd->get_le_from_klpair(&klpair); LEAFENTRY le = extra->bd->get_le_from_klpair(&klpair);
return f(key, keylen_from_klpair_len(klpair_len), le, idx, extra->inner); return f(key, keylen_from_klpair_len(klpair_len), le, idx, extra->inner);
...@@ -143,21 +142,22 @@ static int wrappy_fun_iterate(const uint32_t klpair_len, const klpair_struct &kl ...@@ -143,21 +142,22 @@ static int wrappy_fun_iterate(const uint32_t klpair_len, const klpair_struct &kl
namespace toku { namespace toku {
template<> // dmt writer for klpair_struct
// Use of dmt requires a dmt_functor for the specific type. class klpair_dmtwriter {
class dmt_functor<klpair_struct> {
public: public:
size_t get_dmtdatain_t_size(void) const { // Return the size needed for the klpair_struct that this dmtwriter represents
size_t get_size(void) const {
return sizeof(klpair_struct) + this->keylen; return sizeof(klpair_struct) + this->keylen;
} }
void write_dmtdata_t_to(klpair_struct *const dest) const { // Write the klpair_struct this dmtwriter represents to a destination
void write_to(klpair_struct *const dest) const {
dest->le_offset = this->le_offset; dest->le_offset = this->le_offset;
memcpy(dest->key, this->keyp, this->keylen); memcpy(dest->key, this->keyp, this->keylen);
} }
dmt_functor(uint32_t _keylen, uint32_t _le_offset, const void* _keyp) klpair_dmtwriter(uint32_t _keylen, uint32_t _le_offset, const void* _keyp)
: keylen(_keylen), le_offset(_le_offset), keyp(_keyp) {} : keylen(_keylen), le_offset(_le_offset), keyp(_keyp) {}
dmt_functor(const uint32_t klpair_len, klpair_struct *const src) klpair_dmtwriter(const uint32_t klpair_len, klpair_struct *const src)
: keylen(keylen_from_klpair_len(klpair_len)), le_offset(src->le_offset), keyp(src->key) {} : keylen(keylen_from_klpair_len(klpair_len)), le_offset(src->le_offset), keyp(src->key) {}
private: private:
const uint32_t keylen; const uint32_t keylen;
...@@ -166,48 +166,63 @@ class dmt_functor<klpair_struct> { ...@@ -166,48 +166,63 @@ class dmt_functor<klpair_struct> {
}; };
} }
typedef toku::dmt<KLPAIR_S, KLPAIR> klpair_dmt_t; typedef toku::dmt<klpair_struct, klpair_struct*, toku::klpair_dmtwriter> klpair_dmt_t;
// This class stores the data associated with a basement node // This class stores the data associated with a basement node
class bn_data { class bn_data {
public: public:
// Initialize an empty bn_data _without_ a dmt backing.
// Externally only used for deserialization.
void init_zero(void); void init_zero(void);
// Initialize an empty bn_data _with_ a dmt
void initialize_empty(void); void initialize_empty(void);
// Deserialize from rbuf. // Deserialize a bn_data from rbuf.
void initialize_from_data(uint32_t num_entries, struct rbuf *rb, uint32_t data_size, uint32_t version); // This is the entry point for deserialization.
// globals void deserialize_from_rbuf(uint32_t num_entries, struct rbuf *rb, uint32_t data_size, uint32_t version);
// Retrieve the memory footprint of this basement node.
// May over or under count: see Tokutek/ft-index#136
// Also see dmt's implementation.
uint64_t get_memory_size(void); uint64_t get_memory_size(void);
// Get the serialized size of this basement node.
uint64_t get_disk_size(void); uint64_t get_disk_size(void);
// Perform (paranoid) verification that all leafentries are fully contained within the mempool
void verify_mempool(void); void verify_mempool(void);
// Interact with "dmt" // size() of key dmt
uint32_t omt_size(void) const; uint32_t dmt_size(void) const;
// iterate() on key dmt (and associated leafentries)
template<typename iterate_extra_t, template<typename iterate_extra_t,
int (*f)(const void * key, const uint32_t keylen, const LEAFENTRY &, const uint32_t, iterate_extra_t *const)> int (*f)(const void * key, const uint32_t keylen, const LEAFENTRY &, const uint32_t, iterate_extra_t *const)>
int omt_iterate(iterate_extra_t *const iterate_extra) const { int dmt_iterate(iterate_extra_t *const iterate_extra) const {
return omt_iterate_on_range<iterate_extra_t, f>(0, omt_size(), iterate_extra); return dmt_iterate_on_range<iterate_extra_t, f>(0, dmt_size(), iterate_extra);
} }
// iterate_on_range() on key dmt (and associated leafentries)
template<typename iterate_extra_t, template<typename iterate_extra_t,
int (*f)(const void * key, const uint32_t keylen, const LEAFENTRY &, const uint32_t, iterate_extra_t *const)> int (*f)(const void * key, const uint32_t keylen, const LEAFENTRY &, const uint32_t, iterate_extra_t *const)>
int omt_iterate_on_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra) const { int dmt_iterate_on_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra) const {
wrapped_iterate_extra_t<iterate_extra_t> wrapped_extra = { iterate_extra, this }; klpair_iterate_extra<iterate_extra_t> klpair_extra = { iterate_extra, this };
return m_buffer.iterate_on_range< wrapped_iterate_extra_t<iterate_extra_t>, wrappy_fun_iterate<iterate_extra_t, f> >(left, right, &wrapped_extra); return m_buffer.iterate_on_range< klpair_iterate_extra<iterate_extra_t>, klpair_iterate_wrapper<iterate_extra_t, f> >(left, right, &klpair_extra);
} }
// find_zero() on key dmt
template<typename dmtcmp_t, template<typename dmtcmp_t,
int (*h)(const DBT &, const dmtcmp_t &)> int (*h)(const DBT &, const dmtcmp_t &)>
int find_zero(const dmtcmp_t &extra, LEAFENTRY *const value, void** key, uint32_t* keylen, uint32_t *const idxp) const { int find_zero(const dmtcmp_t &extra, LEAFENTRY *const value, void** key, uint32_t* keylen, uint32_t *const idxp) const {
KLPAIR klpair = NULL; klpair_struct* klpair = nullptr;
uint32_t klpair_len; uint32_t klpair_len;
int r = m_buffer.find_zero< dmtcmp_t, wrappy_fun_find<dmtcmp_t, h> >(extra, &klpair_len, &klpair, idxp); int r = m_buffer.find_zero< dmtcmp_t, klpair_find_wrapper<dmtcmp_t, h> >(extra, &klpair_len, &klpair, idxp);
if (r == 0) { if (r == 0) {
if (value) { if (value) {
*value = get_le_from_klpair(klpair); *value = get_le_from_klpair(klpair);
} }
if (key) { if (key) {
paranoid_invariant(keylen != NULL); paranoid_invariant_notnull(keylen);
*key = klpair->key; *key = klpair->key;
*keylen = keylen_from_klpair_len(klpair_len); *keylen = keylen_from_klpair_len(klpair_len);
} }
...@@ -218,45 +233,56 @@ class bn_data { ...@@ -218,45 +233,56 @@ class bn_data {
return r; return r;
} }
// find() on key dmt (and associated leafentries)
template<typename dmtcmp_t, template<typename dmtcmp_t,
int (*h)(const DBT &, const dmtcmp_t &)> int (*h)(const DBT &, const dmtcmp_t &)>
int find(const dmtcmp_t &extra, int direction, LEAFENTRY *const value, void** key, uint32_t* keylen, uint32_t *const idxp) const { int find(const dmtcmp_t &extra, int direction, LEAFENTRY *const value, void** key, uint32_t* keylen, uint32_t *const idxp) const {
KLPAIR klpair = NULL; klpair_struct* klpair = nullptr;
uint32_t klpair_len; uint32_t klpair_len;
int r = m_buffer.find< dmtcmp_t, wrappy_fun_find<dmtcmp_t, h> >(extra, direction, &klpair_len, &klpair, idxp); int r = m_buffer.find< dmtcmp_t, klpair_find_wrapper<dmtcmp_t, h> >(extra, direction, &klpair_len, &klpair, idxp);
if (r == 0) { if (r == 0) {
if (value) { if (value) {
*value = get_le_from_klpair(klpair); *value = get_le_from_klpair(klpair);
} }
if (key) { if (key) {
paranoid_invariant(keylen != NULL); paranoid_invariant_notnull(keylen);
*key = klpair->key; *key = klpair->key;
*keylen = keylen_from_klpair_len(klpair_len); *keylen = keylen_from_klpair_len(klpair_len);
} }
else { else {
paranoid_invariant(keylen == NULL); paranoid_invariant_null(keylen);
} }
} }
return r; return r;
} }
// get info about a single leafentry by index // Fetch leafentry by index
__attribute__((__nonnull__))
int fetch_le(uint32_t idx, LEAFENTRY *le); int fetch_le(uint32_t idx, LEAFENTRY *le);
// Fetch (leafentry, key, keylen) by index
__attribute__((__nonnull__))
int fetch_klpair(uint32_t idx, LEAFENTRY *le, uint32_t *len, void** key); int fetch_klpair(uint32_t idx, LEAFENTRY *le, uint32_t *len, void** key);
// Fetch (serialized size of leafentry, key, and keylen) by index
__attribute__((__nonnull__))
int fetch_klpair_disksize(uint32_t idx, size_t *size); int fetch_klpair_disksize(uint32_t idx, size_t *size);
int fetch_le_key_and_len(uint32_t idx, uint32_t *len, void** key); // Fetch (key, keylen) by index
__attribute__((__nonnull__))
// Interact with another bn_data int fetch_key_and_len(uint32_t idx, uint32_t *len, void** key);
void move_leafentries_to(BN_DATA dest_bd,
// Move leafentries (and associated key/keylens) from this basement node to dest_bd
// Moves indexes [lbi-ube)
__attribute__((__nonnull__))
void move_leafentries_to(bn_data* dest_bd,
uint32_t lbi, //lower bound inclusive uint32_t lbi, //lower bound inclusive
uint32_t ube //upper bound exclusive uint32_t ube //upper bound exclusive
); );
// Destroy this basement node and free memory.
void destroy(void); void destroy(void);
// Replaces contents, into brand new mempool. // Uses sorted array as input for this basement node.
// Returns old mempool base, expects caller to free it. // Expects this to be a basement node just initialized with initialize_empty()
void replace_contents_with_clone_of_sorted_array( void set_contents_as_clone_of_sorted_array(
uint32_t num_les, uint32_t num_les,
const void** old_key_ptrs, const void** old_key_ptrs,
uint32_t* old_keylens, uint32_t* old_keylens,
...@@ -266,32 +292,46 @@ class bn_data { ...@@ -266,32 +292,46 @@ class bn_data {
size_t total_le_size size_t total_le_size
); );
// Make this basement node a clone of orig_bn_data.
// orig_bn_data still owns all its memory (dmt, mempool)
// this basement node will have a new dmt, mempool containing same data.
void clone(bn_data* orig_bn_data); void clone(bn_data* orig_bn_data);
// Delete klpair index idx with provided keylen and old leafentry with size old_le_size
void delete_leafentry ( void delete_leafentry (
uint32_t idx, uint32_t idx,
uint32_t keylen, uint32_t keylen,
uint32_t old_le_size uint32_t old_le_size
); );
// Allocates space in the mempool to store a new leafentry.
// This may require reorganizing the mempool and updating the dmt.
void get_space_for_overwrite(uint32_t idx, const void* keyp, uint32_t keylen, uint32_t old_size, uint32_t new_size, LEAFENTRY* new_le_space); void get_space_for_overwrite(uint32_t idx, const void* keyp, uint32_t keylen, uint32_t old_size, uint32_t new_size, LEAFENTRY* new_le_space);
// Allocates space in the mempool to store a new leafentry
// and inserts a new key into the dmt
// This may require reorganizing the mempool and updating the dmt.
void get_space_for_insert(uint32_t idx, const void* keyp, uint32_t keylen, size_t size, LEAFENTRY* new_le_space); void get_space_for_insert(uint32_t idx, const void* keyp, uint32_t keylen, size_t size, LEAFENTRY* new_le_space);
// Gets a leafentry given a klpair from this basement node.
LEAFENTRY get_le_from_klpair(const klpair_struct *klpair) const; LEAFENTRY get_le_from_klpair(const klpair_struct *klpair) const;
// Prepares this basement node for serialization.
// Must be called before serializing this basement node. // Must be called before serializing this basement node.
// Between calling prepare_to_serialize and actually serializing, the basement node may not be modified // Between calling prepare_to_serialize and actually serializing, the basement node may not be modified
void prepare_to_serialize(void); void prepare_to_serialize(void);
// Requires prepare_to_serialize() to have been called first.
// Serialize the basement node header to a wbuf // Serialize the basement node header to a wbuf
// Requires prepare_to_serialize() to have been called first.
void serialize_header(struct wbuf *wb) const; void serialize_header(struct wbuf *wb) const;
// Requires prepare_to_serialize() (and serialize_header()) has been called first.
// Serialize all keys and leafentries to a wbuf // Serialize all keys and leafentries to a wbuf
// Requires prepare_to_serialize() (and serialize_header()) has been called first.
// Currently only supported when all keys are fixed-length. // Currently only supported when all keys are fixed-length.
void serialize_rest(struct wbuf *wb) const; void serialize_rest(struct wbuf *wb) const;
// Requires prepare_to_serialize() to have been called first.
// Returns true if we must use the old (version 24) serialization method for this basement node // Returns true if we must use the old (version 24) serialization method for this basement node
// Requires prepare_to_serialize() to have been called first.
// In other words, the bndata does not know how to serialize the keys and leafentries. // In other words, the bndata does not know how to serialize the keys and leafentries.
bool need_to_serialize_each_leafentry_with_key(void) const; bool need_to_serialize_each_leafentry_with_key(void) const;
...@@ -304,25 +344,40 @@ class bn_data { ...@@ -304,25 +344,40 @@ class bn_data {
+ 0; + 0;
private: private:
// Private functions // Allocates space in the mempool.
LEAFENTRY mempool_malloc_and_update_omt(size_t size, void **maybe_free); // If there is insufficient space, the mempool is enlarged and leafentries may be shuffled to reduce fragmentation.
void omt_compress_kvspace(size_t added_size, void **maybe_free, bool force_compress); // If shuffling happens, the offsets stored in the dmt are updated.
LEAFENTRY mempool_malloc_and_update_dmt(size_t size, void **maybe_free);
// Maintain metadata about size of memory for keys (adding a single key) // Change the size of the mempool to support what is already in it, plus added_size.
// possibly "compress" by shuffling leafentries around to reduce fragmentation to 0.
// If fragmentation is already 0 and force_compress is not true, shuffling may be skipped.
// If shuffling happens, leafentries will be stored in the mempool in sorted order.
void dmt_compress_kvspace(size_t added_size, void **maybe_free, bool force_compress);
// Note that a key was added (for maintaining disk-size of this basement node)
void add_key(uint32_t keylen); void add_key(uint32_t keylen);
// Maintain metadata about size of memory for keys (adding multiple keys)
// Note that multiple keys were added (for maintaining disk-size of this basement node)
void add_keys(uint32_t n_keys, uint32_t combined_keylen); void add_keys(uint32_t n_keys, uint32_t combined_keylen);
// Maintain metadata about size of memory for keys (removing a single key)
// Note that a key was removed (for maintaining disk-size of this basement node)
void remove_key(uint32_t keylen); void remove_key(uint32_t keylen);
klpair_dmt_t m_buffer; // pointers to individual leaf entries klpair_dmt_t m_buffer; // pointers to individual leaf entries
struct mempool m_buffer_mempool; // storage for all leaf entries struct mempool m_buffer_mempool; // storage for all leaf entries
friend class bndata_bugfix_test; friend class bndata_bugfix_test;
// Get the serialized size of a klpair.
// As of Jan 14, 2014, serialized size of a klpair is independent of if this basement node has fixed-length keys.
uint32_t klpair_disksize(const uint32_t klpair_len, const klpair_struct *klpair) const; uint32_t klpair_disksize(const uint32_t klpair_len, const klpair_struct *klpair) const;
// The disk/memory size of all keys. (Note that the size of memory for the leafentries is maintained by m_buffer_mempool) // The disk/memory size of all keys. (Note that the size of memory for the leafentries is maintained by m_buffer_mempool)
size_t m_disksize_of_keys; size_t m_disksize_of_keys;
// Deserialize this basement node from rbuf
// all keys will be first followed by all leafentries (both in sorted order)
void initialize_from_separate_keys_and_vals(uint32_t num_entries, struct rbuf *rb, uint32_t data_size, uint32_t version, void initialize_from_separate_keys_and_vals(uint32_t num_entries, struct rbuf *rb, uint32_t data_size, uint32_t version,
uint32_t key_data_size, uint32_t val_data_size, bool all_keys_same_length, uint32_t key_data_size, uint32_t val_data_size, bool all_keys_same_length,
uint32_t fixed_key_length); uint32_t fixed_key_length);
......
...@@ -97,28 +97,6 @@ PATENT RIGHTS GRANT: ...@@ -97,28 +97,6 @@ PATENT RIGHTS GRANT:
#include <util/mempool.h> #include <util/mempool.h>
#include "dmt-wrapper.h" #include "dmt-wrapper.h"
namespace toku {
template<>
class dmt_functor<DMTVALUE> {
public:
size_t get_dmtdatain_t_size(void) const {
return sizeof(DMTVALUE);
}
void write_dmtdata_t_to(DMTVALUE *const dest) const {
*dest = value;
}
dmt_functor(DMTVALUE _value)
: value(_value) {}
dmt_functor(const uint32_t size UU(), DMTVALUE *const src)
: value(*src) {
paranoid_invariant(size == sizeof(DMTVALUE));
}
private:
const DMTVALUE value;
};
}
int int
toku_dmt_create_steal_sorted_array(DMT *dmtp, DMTVALUE **valuesp, uint32_t numvalues, uint32_t capacity) { toku_dmt_create_steal_sorted_array(DMT *dmtp, DMTVALUE **valuesp, uint32_t numvalues, uint32_t capacity) {
//TODO: implement using create_steal_sorted_array when it exists //TODO: implement using create_steal_sorted_array when it exists
...@@ -167,7 +145,7 @@ int toku_dmt_create_from_sorted_array(DMT *dmtp, DMTVALUE *values, uint32_t numv ...@@ -167,7 +145,7 @@ int toku_dmt_create_from_sorted_array(DMT *dmtp, DMTVALUE *values, uint32_t numv
} }
int toku_dmt_insert_at(DMT dmt, DMTVALUE value, uint32_t index) { int toku_dmt_insert_at(DMT dmt, DMTVALUE value, uint32_t index) {
toku::dmt_functor<DMTVALUE> functor(value); dmt_wrapper_internal::dmtvalue_writer functor(value);
return dmt->insert_at(functor, index); return dmt->insert_at(functor, index);
} }
...@@ -222,7 +200,7 @@ int call_heftor(const uint32_t size, const DMTVALUE &v, const heftor &htor) { ...@@ -222,7 +200,7 @@ int call_heftor(const uint32_t size, const DMTVALUE &v, const heftor &htor) {
int toku_dmt_insert(DMT dmt, DMTVALUE value, int(*h)(DMTVALUE, void*v), void *v, uint32_t *index) { int toku_dmt_insert(DMT dmt, DMTVALUE value, int(*h)(DMTVALUE, void*v), void *v, uint32_t *index) {
struct heftor htor = { .h = h, .v = v }; struct heftor htor = { .h = h, .v = v };
toku::dmt_functor<DMTVALUE> functor(value); dmt_wrapper_internal::dmtvalue_writer functor(value);
return dmt->insert<heftor, call_heftor>(functor, htor, index); return dmt->insert<heftor, call_heftor>(functor, htor, index);
} }
......
...@@ -143,8 +143,32 @@ PATENT RIGHTS GRANT: ...@@ -143,8 +143,32 @@ PATENT RIGHTS GRANT:
//typedef struct value *DMTVALUE; // A slight improvement over using void*. //typedef struct value *DMTVALUE; // A slight improvement over using void*.
#include <util/dmt.h> #include <util/dmt.h>
typedef void *DMTVALUE; typedef void *DMTVALUE;
typedef toku::dmt<DMTVALUE> *DMT;
namespace dmt_wrapper_internal {
class dmtvalue_writer {
public:
size_t get_size(void) const {
return sizeof(DMTVALUE);
}
void write_to(DMTVALUE *const dest) const {
*dest = value;
}
dmtvalue_writer(DMTVALUE _value)
: value(_value) {}
dmtvalue_writer(const uint32_t size UU(), DMTVALUE *const src)
: value(*src) {
paranoid_invariant(size == sizeof(DMTVALUE));
}
private:
const DMTVALUE value;
};
};
typedef toku::dmt<DMTVALUE, DMTVALUE, dmt_wrapper_internal::dmtvalue_writer> *DMT;
int toku_dmt_create (DMT *dmtp); int toku_dmt_create (DMT *dmtp);
......
...@@ -689,16 +689,16 @@ ftleaf_get_split_loc( ...@@ -689,16 +689,16 @@ ftleaf_get_split_loc(
switch (split_mode) { switch (split_mode) {
case SPLIT_LEFT_HEAVY: { case SPLIT_LEFT_HEAVY: {
*num_left_bns = node->n_children; *num_left_bns = node->n_children;
*num_left_les = BLB_DATA(node, *num_left_bns - 1)->omt_size(); *num_left_les = BLB_DATA(node, *num_left_bns - 1)->dmt_size();
if (*num_left_les == 0) { if (*num_left_les == 0) {
*num_left_bns = node->n_children - 1; *num_left_bns = node->n_children - 1;
*num_left_les = BLB_DATA(node, *num_left_bns - 1)->omt_size(); *num_left_les = BLB_DATA(node, *num_left_bns - 1)->dmt_size();
} }
goto exit; goto exit;
} }
case SPLIT_RIGHT_HEAVY: { case SPLIT_RIGHT_HEAVY: {
*num_left_bns = 1; *num_left_bns = 1;
*num_left_les = BLB_DATA(node, 0)->omt_size() ? 1 : 0; *num_left_les = BLB_DATA(node, 0)->dmt_size() ? 1 : 0;
goto exit; goto exit;
} }
case SPLIT_EVENLY: { case SPLIT_EVENLY: {
...@@ -707,8 +707,8 @@ ftleaf_get_split_loc( ...@@ -707,8 +707,8 @@ ftleaf_get_split_loc(
uint64_t sumlesizes = ftleaf_disk_size(node); uint64_t sumlesizes = ftleaf_disk_size(node);
uint32_t size_so_far = 0; uint32_t size_so_far = 0;
for (int i = 0; i < node->n_children; i++) { for (int i = 0; i < node->n_children; i++) {
BN_DATA bd = BLB_DATA(node, i); bn_data* bd = BLB_DATA(node, i);
uint32_t n_leafentries = bd->omt_size(); uint32_t n_leafentries = bd->dmt_size();
for (uint32_t j=0; j < n_leafentries; j++) { for (uint32_t j=0; j < n_leafentries; j++) {
size_t size_this_le; size_t size_this_le;
int rr = bd->fetch_klpair_disksize(j, &size_this_le); int rr = bd->fetch_klpair_disksize(j, &size_this_le);
...@@ -725,7 +725,7 @@ ftleaf_get_split_loc( ...@@ -725,7 +725,7 @@ ftleaf_get_split_loc(
(*num_left_les)--; (*num_left_les)--;
} else if (*num_left_bns > 1) { } else if (*num_left_bns > 1) {
(*num_left_bns)--; (*num_left_bns)--;
*num_left_les = BLB_DATA(node, *num_left_bns - 1)->omt_size(); *num_left_les = BLB_DATA(node, *num_left_bns - 1)->dmt_size();
} else { } else {
// we are trying to split a leaf with only one // we are trying to split a leaf with only one
// leafentry in it // leafentry in it
...@@ -851,7 +851,7 @@ ftleaf_split( ...@@ -851,7 +851,7 @@ ftleaf_split(
ftleaf_get_split_loc(node, split_mode, &num_left_bns, &num_left_les); ftleaf_get_split_loc(node, split_mode, &num_left_bns, &num_left_les);
{ {
// did we split right on the boundary between basement nodes? // did we split right on the boundary between basement nodes?
const bool split_on_boundary = (num_left_les == 0) || (num_left_les == (int) BLB_DATA(node, num_left_bns - 1)->omt_size()); const bool split_on_boundary = (num_left_les == 0) || (num_left_les == (int) BLB_DATA(node, num_left_bns - 1)->dmt_size());
// Now we know where we are going to break it // Now we know where we are going to break it
// the two nodes will have a total of n_children+1 basement nodes // the two nodes will have a total of n_children+1 basement nodes
// and n_children-1 pivots // and n_children-1 pivots
...@@ -912,7 +912,7 @@ ftleaf_split( ...@@ -912,7 +912,7 @@ ftleaf_split(
move_leafentries(BLB(B, curr_dest_bn_index), move_leafentries(BLB(B, curr_dest_bn_index),
BLB(node, curr_src_bn_index), BLB(node, curr_src_bn_index),
num_left_les, // first row to be moved to B num_left_les, // first row to be moved to B
BLB_DATA(node, curr_src_bn_index)->omt_size() // number of rows in basement to be split BLB_DATA(node, curr_src_bn_index)->dmt_size() // number of rows in basement to be split
); );
BLB_MAX_MSN_APPLIED(B, curr_dest_bn_index) = BLB_MAX_MSN_APPLIED(node, curr_src_bn_index); BLB_MAX_MSN_APPLIED(B, curr_dest_bn_index) = BLB_MAX_MSN_APPLIED(node, curr_src_bn_index);
curr_dest_bn_index++; curr_dest_bn_index++;
...@@ -954,10 +954,10 @@ ftleaf_split( ...@@ -954,10 +954,10 @@ ftleaf_split(
toku_destroy_dbt(&node->childkeys[num_left_bns - 1]); toku_destroy_dbt(&node->childkeys[num_left_bns - 1]);
} }
} else if (splitk) { } else if (splitk) {
BN_DATA bd = BLB_DATA(node, num_left_bns - 1); bn_data* bd = BLB_DATA(node, num_left_bns - 1);
uint32_t keylen; uint32_t keylen;
void *key; void *key;
int rr = bd->fetch_le_key_and_len(bd->omt_size() - 1, &keylen, &key); int rr = bd->fetch_key_and_len(bd->dmt_size() - 1, &keylen, &key);
invariant_zero(rr); invariant_zero(rr);
toku_memdup_dbt(splitk, key, keylen); toku_memdup_dbt(splitk, key, keylen);
} }
...@@ -1168,11 +1168,11 @@ merge_leaf_nodes(FTNODE a, FTNODE b) ...@@ -1168,11 +1168,11 @@ merge_leaf_nodes(FTNODE a, FTNODE b)
a->dirty = 1; a->dirty = 1;
b->dirty = 1; b->dirty = 1;
BN_DATA a_last_bd = BLB_DATA(a, a->n_children-1); bn_data* a_last_bd = BLB_DATA(a, a->n_children-1);
// this bool states if the last basement node in a has any items or not // this bool states if the last basement node in a has any items or not
// If it does, then it stays in the merge. If it does not, the last basement node // If it does, then it stays in the merge. If it does not, the last basement node
// of a gets eliminated because we do not have a pivot to store for it (because it has no elements) // of a gets eliminated because we do not have a pivot to store for it (because it has no elements)
const bool a_has_tail = a_last_bd->omt_size() > 0; const bool a_has_tail = a_last_bd->dmt_size() > 0;
// move each basement node from b to a // move each basement node from b to a
// move the pivots, adding one of what used to be max(a) // move the pivots, adding one of what used to be max(a)
...@@ -1199,7 +1199,7 @@ merge_leaf_nodes(FTNODE a, FTNODE b) ...@@ -1199,7 +1199,7 @@ merge_leaf_nodes(FTNODE a, FTNODE b)
if (a_has_tail) { if (a_has_tail) {
uint32_t keylen; uint32_t keylen;
void *key; void *key;
int rr = a_last_bd->fetch_le_key_and_len(a_last_bd->omt_size() - 1, &keylen, &key); int rr = a_last_bd->fetch_key_and_len(a_last_bd->dmt_size() - 1, &keylen, &key);
invariant_zero(rr); invariant_zero(rr);
toku_memdup_dbt(&a->childkeys[a->n_children-1], key, keylen); toku_memdup_dbt(&a->childkeys[a->n_children-1], key, keylen);
a->totalchildkeylens += keylen; a->totalchildkeylens += keylen;
......
...@@ -419,7 +419,7 @@ get_leaf_num_entries(FTNODE node) { ...@@ -419,7 +419,7 @@ get_leaf_num_entries(FTNODE node) {
int i; int i;
toku_assert_entire_node_in_memory(node); toku_assert_entire_node_in_memory(node);
for ( i = 0; i < node->n_children; i++) { for ( i = 0; i < node->n_children; i++) {
result += BLB_DATA(node, i)->omt_size(); result += BLB_DATA(node, i)->dmt_size();
} }
return result; return result;
} }
...@@ -1720,7 +1720,7 @@ toku_ft_bn_apply_cmd_once ( ...@@ -1720,7 +1720,7 @@ toku_ft_bn_apply_cmd_once (
oldsize = leafentry_memsize(le) + key_storage_size; oldsize = leafentry_memsize(le) + key_storage_size;
} }
// toku_le_apply_msg() may call mempool_malloc_from_omt() to allocate more space. // toku_le_apply_msg() may call bn_data::mempool_malloc_and_update_dmt() to allocate more space.
// That means le is guaranteed to not cause a sigsegv but it may point to a mempool that is // That means le is guaranteed to not cause a sigsegv but it may point to a mempool that is
// no longer in use. We'll have to release the old mempool later. // no longer in use. We'll have to release the old mempool later.
toku_le_apply_msg( toku_le_apply_msg(
...@@ -1910,7 +1910,7 @@ toku_ft_bn_apply_cmd ( ...@@ -1910,7 +1910,7 @@ toku_ft_bn_apply_cmd (
void* key = NULL; void* key = NULL;
uint32_t keylen = 0; uint32_t keylen = 0;
uint32_t omt_size; uint32_t dmt_size;
int r; int r;
struct cmd_leafval_heaviside_extra be = {compare_fun, desc, cmd->u.id.key}; struct cmd_leafval_heaviside_extra be = {compare_fun, desc, cmd->u.id.key};
...@@ -1922,9 +1922,9 @@ toku_ft_bn_apply_cmd ( ...@@ -1922,9 +1922,9 @@ toku_ft_bn_apply_cmd (
case FT_INSERT: { case FT_INSERT: {
uint32_t idx; uint32_t idx;
if (doing_seqinsert) { if (doing_seqinsert) {
idx = bn->data_buffer.omt_size(); idx = bn->data_buffer.dmt_size();
DBT kdbt; DBT kdbt;
r = bn->data_buffer.fetch_le_key_and_len(idx-1, &kdbt.size, &kdbt.data); r = bn->data_buffer.fetch_key_and_len(idx-1, &kdbt.size, &kdbt.data);
if (r != 0) goto fz; if (r != 0) goto fz;
int cmp = toku_cmd_leafval_heaviside(kdbt, be); int cmp = toku_cmd_leafval_heaviside(kdbt, be);
if (cmp >= 0) goto fz; if (cmp >= 0) goto fz;
...@@ -1950,7 +1950,7 @@ toku_ft_bn_apply_cmd ( ...@@ -1950,7 +1950,7 @@ toku_ft_bn_apply_cmd (
// the leaf then it is sequential // the leaf then it is sequential
// window = min(32, number of leaf entries/16) // window = min(32, number of leaf entries/16)
{ {
uint32_t s = bn->data_buffer.omt_size(); uint32_t s = bn->data_buffer.dmt_size();
uint32_t w = s / 16; uint32_t w = s / 16;
if (w == 0) w = 1; if (w == 0) w = 1;
if (w > 32) w = 32; if (w > 32) w = 32;
...@@ -1985,8 +1985,8 @@ toku_ft_bn_apply_cmd ( ...@@ -1985,8 +1985,8 @@ toku_ft_bn_apply_cmd (
case FT_COMMIT_BROADCAST_ALL: case FT_COMMIT_BROADCAST_ALL:
case FT_OPTIMIZE: case FT_OPTIMIZE:
// Apply to all leafentries // Apply to all leafentries
omt_size = bn->data_buffer.omt_size(); dmt_size = bn->data_buffer.dmt_size();
for (uint32_t idx = 0; idx < omt_size; ) { for (uint32_t idx = 0; idx < dmt_size; ) {
DBT curr_keydbt; DBT curr_keydbt;
void* curr_keyp = NULL; void* curr_keyp = NULL;
uint32_t curr_keylen = 0; uint32_t curr_keylen = 0;
...@@ -2000,26 +2000,26 @@ toku_ft_bn_apply_cmd ( ...@@ -2000,26 +2000,26 @@ toku_ft_bn_apply_cmd (
if (!le_is_clean(storeddata)) { //If already clean, nothing to do. if (!le_is_clean(storeddata)) { //If already clean, nothing to do.
toku_ft_bn_apply_cmd_once(bn, cmd, idx, storeddata, oldest_referenced_xid_known, gc_info, workdone, stats_to_update); toku_ft_bn_apply_cmd_once(bn, cmd, idx, storeddata, oldest_referenced_xid_known, gc_info, workdone, stats_to_update);
// at this point, we cannot trust cmd->u.id.key to be valid. // at this point, we cannot trust cmd->u.id.key to be valid.
uint32_t new_omt_size = bn->data_buffer.omt_size(); uint32_t new_dmt_size = bn->data_buffer.dmt_size();
if (new_omt_size != omt_size) { if (new_dmt_size != dmt_size) {
paranoid_invariant(new_omt_size+1 == omt_size); paranoid_invariant(new_dmt_size+1 == dmt_size);
//Item was deleted. //Item was deleted.
deleted = 1; deleted = 1;
} }
} }
if (deleted) if (deleted)
omt_size--; dmt_size--;
else else
idx++; idx++;
} }
paranoid_invariant(bn->data_buffer.omt_size() == omt_size); paranoid_invariant(bn->data_buffer.dmt_size() == dmt_size);
break; break;
case FT_COMMIT_BROADCAST_TXN: case FT_COMMIT_BROADCAST_TXN:
case FT_ABORT_BROADCAST_TXN: case FT_ABORT_BROADCAST_TXN:
// Apply to all leafentries if txn is represented // Apply to all leafentries if txn is represented
omt_size = bn->data_buffer.omt_size(); dmt_size = bn->data_buffer.dmt_size();
for (uint32_t idx = 0; idx < omt_size; ) { for (uint32_t idx = 0; idx < dmt_size; ) {
DBT curr_keydbt; DBT curr_keydbt;
void* curr_keyp = NULL; void* curr_keyp = NULL;
uint32_t curr_keylen = 0; uint32_t curr_keylen = 0;
...@@ -2032,19 +2032,19 @@ toku_ft_bn_apply_cmd ( ...@@ -2032,19 +2032,19 @@ toku_ft_bn_apply_cmd (
int deleted = 0; int deleted = 0;
if (le_has_xids(storeddata, cmd->xids)) { if (le_has_xids(storeddata, cmd->xids)) {
toku_ft_bn_apply_cmd_once(bn, cmd, idx, storeddata, oldest_referenced_xid_known, gc_info, workdone, stats_to_update); toku_ft_bn_apply_cmd_once(bn, cmd, idx, storeddata, oldest_referenced_xid_known, gc_info, workdone, stats_to_update);
uint32_t new_omt_size = bn->data_buffer.omt_size(); uint32_t new_dmt_size = bn->data_buffer.dmt_size();
if (new_omt_size != omt_size) { if (new_dmt_size != dmt_size) {
paranoid_invariant(new_omt_size+1 == omt_size); paranoid_invariant(new_dmt_size+1 == dmt_size);
//Item was deleted. //Item was deleted.
deleted = 1; deleted = 1;
} }
} }
if (deleted) if (deleted)
omt_size--; dmt_size--;
else else
idx++; idx++;
} }
paranoid_invariant(bn->data_buffer.omt_size() == omt_size); paranoid_invariant(bn->data_buffer.dmt_size() == dmt_size);
break; break;
case FT_UPDATE: { case FT_UPDATE: {
...@@ -2073,7 +2073,7 @@ toku_ft_bn_apply_cmd ( ...@@ -2073,7 +2073,7 @@ toku_ft_bn_apply_cmd (
// apply to all leafentries. // apply to all leafentries.
uint32_t idx = 0; uint32_t idx = 0;
uint32_t num_leafentries_before; uint32_t num_leafentries_before;
while (idx < (num_leafentries_before = bn->data_buffer.omt_size())) { while (idx < (num_leafentries_before = bn->data_buffer.dmt_size())) {
void* curr_key = nullptr; void* curr_key = nullptr;
uint32_t curr_keylen = 0; uint32_t curr_keylen = 0;
r = bn->data_buffer.fetch_klpair(idx, &storeddata, &curr_keylen, &curr_key); r = bn->data_buffer.fetch_klpair(idx, &storeddata, &curr_keylen, &curr_key);
...@@ -2091,7 +2091,7 @@ toku_ft_bn_apply_cmd ( ...@@ -2091,7 +2091,7 @@ toku_ft_bn_apply_cmd (
r = do_update(update_fun, desc, bn, cmd, idx, storeddata, curr_key, curr_keylen, oldest_referenced_xid_known, gc_info, workdone, stats_to_update); r = do_update(update_fun, desc, bn, cmd, idx, storeddata, curr_key, curr_keylen, oldest_referenced_xid_known, gc_info, workdone, stats_to_update);
assert_zero(r); assert_zero(r);
if (num_leafentries_before == bn->data_buffer.omt_size()) { if (num_leafentries_before == bn->data_buffer.dmt_size()) {
// we didn't delete something, so increment the index. // we didn't delete something, so increment the index.
idx++; idx++;
} }
...@@ -2404,7 +2404,7 @@ basement_node_gc_all_les(BASEMENTNODE bn, ...@@ -2404,7 +2404,7 @@ basement_node_gc_all_les(BASEMENTNODE bn,
int r = 0; int r = 0;
uint32_t index = 0; uint32_t index = 0;
uint32_t num_leafentries_before; uint32_t num_leafentries_before;
while (index < (num_leafentries_before = bn->data_buffer.omt_size())) { while (index < (num_leafentries_before = bn->data_buffer.dmt_size())) {
void* keyp = NULL; void* keyp = NULL;
uint32_t keylen = 0; uint32_t keylen = 0;
LEAFENTRY leaf_entry; LEAFENTRY leaf_entry;
...@@ -2423,7 +2423,7 @@ basement_node_gc_all_les(BASEMENTNODE bn, ...@@ -2423,7 +2423,7 @@ basement_node_gc_all_les(BASEMENTNODE bn,
delta delta
); );
// Check if the leaf entry was deleted or not. // Check if the leaf entry was deleted or not.
if (num_leafentries_before == bn->data_buffer.omt_size()) { if (num_leafentries_before == bn->data_buffer.dmt_size()) {
++index; ++index;
} }
} }
...@@ -4929,7 +4929,7 @@ ok: ; ...@@ -4929,7 +4929,7 @@ ok: ;
switch (search->direction) { switch (search->direction) {
case FT_SEARCH_LEFT: case FT_SEARCH_LEFT:
idx++; idx++;
if (idx >= bn->data_buffer.omt_size()) { if (idx >= bn->data_buffer.dmt_size()) {
if (ftcursor->interrupt_cb && ftcursor->interrupt_cb(ftcursor->interrupt_cb_extra)) { if (ftcursor->interrupt_cb && ftcursor->interrupt_cb(ftcursor->interrupt_cb_extra)) {
return TOKUDB_INTERRUPTED; return TOKUDB_INTERRUPTED;
} }
...@@ -5604,7 +5604,7 @@ ft_cursor_shortcut ( ...@@ -5604,7 +5604,7 @@ ft_cursor_shortcut (
int r = 0; int r = 0;
// if we are searching towards the end, limit is last element // if we are searching towards the end, limit is last element
// if we are searching towards the beginning, limit is the first element // if we are searching towards the beginning, limit is the first element
uint32_t limit = (direction > 0) ? (bd->omt_size() - 1) : 0; uint32_t limit = (direction > 0) ? (bd->dmt_size() - 1) : 0;
//Starting with the prev, find the first real (non-provdel) leafentry. //Starting with the prev, find the first real (non-provdel) leafentry.
while (index != limit) { while (index != limit) {
...@@ -5895,7 +5895,7 @@ keysrange_in_leaf_partition (FT_HANDLE brt, FTNODE node, ...@@ -5895,7 +5895,7 @@ keysrange_in_leaf_partition (FT_HANDLE brt, FTNODE node,
*less = idx_left; *less = idx_left;
*equal_left = (r==0) ? 1 : 0; *equal_left = (r==0) ? 1 : 0;
uint32_t size = bn->data_buffer.omt_size(); uint32_t size = bn->data_buffer.dmt_size();
uint32_t idx_right = size; uint32_t idx_right = size;
r = -1; r = -1;
if (single_basement && key_right) { if (single_basement && key_right) {
...@@ -6155,7 +6155,7 @@ static int get_key_after_bytes_in_basementnode(FT ft, BASEMENTNODE bn, const DBT ...@@ -6155,7 +6155,7 @@ static int get_key_after_bytes_in_basementnode(FT ft, BASEMENTNODE bn, const DBT
assert(r == 0 || r == DB_NOTFOUND); assert(r == 0 || r == DB_NOTFOUND);
} }
struct get_key_after_bytes_iterate_extra iter_extra = {skip_len, skipped, callback, cb_extra}; struct get_key_after_bytes_iterate_extra iter_extra = {skip_len, skipped, callback, cb_extra};
r = bn->data_buffer.omt_iterate_on_range<get_key_after_bytes_iterate_extra, get_key_after_bytes_iterate>(idx_left, bn->data_buffer.omt_size(), &iter_extra); r = bn->data_buffer.dmt_iterate_on_range<get_key_after_bytes_iterate_extra, get_key_after_bytes_iterate>(idx_left, bn->data_buffer.dmt_size(), &iter_extra);
// Invert the sense of r == 0 (meaning the iterate finished, which means we didn't find what we wanted) // Invert the sense of r == 0 (meaning the iterate finished, which means we didn't find what we wanted)
if (r == 1) { if (r == 1) {
...@@ -6351,7 +6351,7 @@ toku_dump_ftnode (FILE *file, FT_HANDLE brt, BLOCKNUM blocknum, int depth, const ...@@ -6351,7 +6351,7 @@ toku_dump_ftnode (FILE *file, FT_HANDLE brt, BLOCKNUM blocknum, int depth, const
}); });
} }
else { else {
int size = BLB_DATA(node, i)->omt_size(); int size = BLB_DATA(node, i)->dmt_size();
if (0) if (0)
for (int j=0; j<size; j++) { for (int j=0; j<size; j++) {
LEAFENTRY le; LEAFENTRY le;
...@@ -6531,9 +6531,9 @@ static bool is_empty_fast_iter (FT_HANDLE brt, FTNODE node) { ...@@ -6531,9 +6531,9 @@ static bool is_empty_fast_iter (FT_HANDLE brt, FTNODE node) {
} }
return 1; return 1;
} else { } else {
// leaf: If the omt is empty, we are happy. // leaf: If the dmt is empty, we are happy.
for (int i = 0; i < node->n_children; i++) { for (int i = 0; i < node->n_children; i++) {
if (BLB_DATA(node, i)->omt_size()) { if (BLB_DATA(node, i)->dmt_size()) {
return false; return false;
} }
} }
......
...@@ -152,7 +152,7 @@ verify_msg_in_child_buffer(FT_HANDLE brt, enum ft_msg_type type, MSN msn, byteve ...@@ -152,7 +152,7 @@ verify_msg_in_child_buffer(FT_HANDLE brt, enum ft_msg_type type, MSN msn, byteve
static DBT static DBT
get_ith_key_dbt (BASEMENTNODE bn, int i) { get_ith_key_dbt (BASEMENTNODE bn, int i) {
DBT kdbt; DBT kdbt;
int r = bn->data_buffer.fetch_le_key_and_len(i, &kdbt.size, &kdbt.data); int r = bn->data_buffer.fetch_key_and_len(i, &kdbt.size, &kdbt.data);
invariant_zero(r); // this is a bad failure if it happens. invariant_zero(r); // this is a bad failure if it happens.
return kdbt; return kdbt;
} }
...@@ -424,7 +424,7 @@ toku_verify_ftnode_internal(FT_HANDLE brt, ...@@ -424,7 +424,7 @@ toku_verify_ftnode_internal(FT_HANDLE brt,
} }
else { else {
BASEMENTNODE bn = BLB(node, i); BASEMENTNODE bn = BLB(node, i);
for (uint32_t j = 0; j < bn->data_buffer.omt_size(); j++) { for (uint32_t j = 0; j < bn->data_buffer.dmt_size(); j++) {
VERIFY_ASSERTION((rootmsn.msn >= this_msn.msn), 0, "leaf may have latest msn, but cannot be greater than root msn"); VERIFY_ASSERTION((rootmsn.msn >= this_msn.msn), 0, "leaf may have latest msn, but cannot be greater than root msn");
DBT kdbt = get_ith_key_dbt(bn, j); DBT kdbt = get_ith_key_dbt(bn, j);
if (curr_less_pivot) { if (curr_less_pivot) {
......
...@@ -1077,8 +1077,8 @@ garbage_helper(BLOCKNUM blocknum, int64_t UU(size), int64_t UU(address), void *e ...@@ -1077,8 +1077,8 @@ garbage_helper(BLOCKNUM blocknum, int64_t UU(size), int64_t UU(address), void *e
goto exit; goto exit;
} }
for (int i = 0; i < node->n_children; ++i) { for (int i = 0; i < node->n_children; ++i) {
BN_DATA bd = BLB_DATA(node, i); bn_data* bd = BLB_DATA(node, i);
r = bd->omt_iterate<struct garbage_helper_extra, garbage_leafentry_helper>(info); r = bd->dmt_iterate<struct garbage_helper_extra, garbage_leafentry_helper>(info);
if (r != 0) { if (r != 0) {
goto exit; goto exit;
} }
......
...@@ -375,10 +375,10 @@ serialize_ftnode_partition(FTNODE node, int i, struct sub_block *sb) { ...@@ -375,10 +375,10 @@ serialize_ftnode_partition(FTNODE node, int i, struct sub_block *sb) {
} }
else { else {
unsigned char ch = FTNODE_PARTITION_OMT_LEAVES; unsigned char ch = FTNODE_PARTITION_OMT_LEAVES;
BN_DATA bd = BLB_DATA(node, i); bn_data* bd = BLB_DATA(node, i);
wbuf_nocrc_char(&wb, ch); wbuf_nocrc_char(&wb, ch);
wbuf_nocrc_uint(&wb, bd->omt_size()); wbuf_nocrc_uint(&wb, bd->dmt_size());
bd->prepare_to_serialize(); bd->prepare_to_serialize();
bd->serialize_header(&wb); bd->serialize_header(&wb);
...@@ -386,7 +386,7 @@ serialize_ftnode_partition(FTNODE node, int i, struct sub_block *sb) { ...@@ -386,7 +386,7 @@ serialize_ftnode_partition(FTNODE node, int i, struct sub_block *sb) {
// //
// iterate over leafentries and place them into the buffer // iterate over leafentries and place them into the buffer
// //
bd->omt_iterate<struct wbuf, wbufwriteleafentry>(&wb); bd->dmt_iterate<struct wbuf, wbufwriteleafentry>(&wb);
} else { } else {
bd->serialize_rest(&wb); bd->serialize_rest(&wb);
} }
...@@ -552,7 +552,7 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize) ...@@ -552,7 +552,7 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize)
// Count number of leaf entries in this leaf (num_le). // Count number of leaf entries in this leaf (num_le).
uint32_t num_le = 0; uint32_t num_le = 0;
for (uint32_t i = 0; i < num_orig_basements; i++) { for (uint32_t i = 0; i < num_orig_basements; i++) {
num_le += BLB_DATA(node, i)->omt_size(); num_le += BLB_DATA(node, i)->dmt_size();
} }
uint32_t num_alloc = num_le ? num_le : 1; // simplify logic below by always having at least one entry per array uint32_t num_alloc = num_le ? num_le : 1; // simplify logic below by always having at least one entry per array
...@@ -577,10 +577,10 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize) ...@@ -577,10 +577,10 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize)
uint32_t curr_le = 0; uint32_t curr_le = 0;
for (uint32_t i = 0; i < num_orig_basements; i++) { for (uint32_t i = 0; i < num_orig_basements; i++) {
BN_DATA bd = BLB_DATA(node, i); bn_data* bd = BLB_DATA(node, i);
struct array_info ai {.offset = curr_le, .le_array = leafpointers, .key_sizes_array = key_sizes, .key_ptr_array = key_pointers }; struct array_info ai {.offset = curr_le, .le_array = leafpointers, .key_sizes_array = key_sizes, .key_ptr_array = key_pointers };
bd->omt_iterate<array_info, array_item>(&ai); bd->dmt_iterate<array_info, array_item>(&ai);
curr_le += bd->omt_size(); curr_le += bd->dmt_size();
} }
// Create an array that will store indexes of new pivots. // Create an array that will store indexes of new pivots.
...@@ -702,8 +702,8 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize) ...@@ -702,8 +702,8 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize)
uint32_t num_les_to_copy = num_les_this_bn[i]; uint32_t num_les_to_copy = num_les_this_bn[i];
invariant(num_les_to_copy == num_in_bn); invariant(num_les_to_copy == num_in_bn);
BN_DATA bd = BLB_DATA(node, i); bn_data* bd = BLB_DATA(node, i);
bd->replace_contents_with_clone_of_sorted_array( bd->set_contents_as_clone_of_sorted_array(
num_les_to_copy, num_les_to_copy,
&key_pointers[baseindex_this_bn], &key_pointers[baseindex_this_bn],
&key_sizes[baseindex_this_bn], &key_sizes[baseindex_this_bn],
...@@ -1560,7 +1560,7 @@ deserialize_ftnode_partition( ...@@ -1560,7 +1560,7 @@ deserialize_ftnode_partition(
data_size -= rb.ndone; // remaining bytes of leafentry data data_size -= rb.ndone; // remaining bytes of leafentry data
BASEMENTNODE bn = BLB(node, childnum); BASEMENTNODE bn = BLB(node, childnum);
bn->data_buffer.initialize_from_data(num_entries, &rb, data_size, node->layout_version_read_from_disk); bn->data_buffer.deserialize_from_rbuf(num_entries, &rb, data_size, node->layout_version_read_from_disk);
} }
assert(rb.ndone == rb.size); assert(rb.ndone == rb.size);
exit: exit:
...@@ -2112,7 +2112,7 @@ deserialize_and_upgrade_leaf_node(FTNODE node, ...@@ -2112,7 +2112,7 @@ deserialize_and_upgrade_leaf_node(FTNODE node,
if (has_end_to_end_checksum) { if (has_end_to_end_checksum) {
data_size -= sizeof(uint32_t); data_size -= sizeof(uint32_t);
} }
bn->data_buffer.initialize_from_data(n_in_buf, rb, data_size, node->layout_version_read_from_disk); bn->data_buffer.deserialize_from_rbuf(n_in_buf, rb, data_size, node->layout_version_read_from_disk);
} }
// Whatever this is must be less than the MSNs of every message above // Whatever this is must be less than the MSNs of every message above
......
...@@ -2917,7 +2917,7 @@ static void add_pair_to_leafnode (struct leaf_buf *lbuf, unsigned char *key, int ...@@ -2917,7 +2917,7 @@ static void add_pair_to_leafnode (struct leaf_buf *lbuf, unsigned char *key, int
// #3588 TODO just make a clean ule and append it to the omt // #3588 TODO just make a clean ule and append it to the omt
// #3588 TODO can do the rebalancing here and avoid a lot of work later // #3588 TODO can do the rebalancing here and avoid a lot of work later
FTNODE leafnode = lbuf->node; FTNODE leafnode = lbuf->node;
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size(); uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
DBT thekey = { .data = key, .size = (uint32_t) keylen }; DBT thekey = { .data = key, .size = (uint32_t) keylen };
DBT theval = { .data = val, .size = (uint32_t) vallen }; DBT theval = { .data = val, .size = (uint32_t) vallen };
FT_MSG_S cmd = { .type = FT_INSERT, FT_MSG_S cmd = { .type = FT_INSERT,
......
...@@ -234,7 +234,7 @@ typedef struct cachetable *CACHETABLE; ...@@ -234,7 +234,7 @@ typedef struct cachetable *CACHETABLE;
typedef struct cachefile *CACHEFILE; typedef struct cachefile *CACHEFILE;
typedef struct ctpair *PAIR; typedef struct ctpair *PAIR;
typedef class checkpointer *CHECKPOINTER; typedef class checkpointer *CHECKPOINTER;
typedef class bn_data *BN_DATA; class bn_data;
/* tree command types */ /* tree command types */
enum ft_msg_type { enum ft_msg_type {
......
...@@ -131,27 +131,26 @@ struct val_type { ...@@ -131,27 +131,26 @@ struct val_type {
}; };
namespace toku { namespace toku {
template<> class vwriter {
class dmt_functor<val_type> {
public: public:
size_t get_dmtdatain_t_size(void) const { size_t get_size(void) const {
size_t len = strlen(v.c); size_t len = strlen(v.c);
invariant(len < sizeof(val_type)); invariant(len < sizeof(val_type));
return len + 1; return len + 1;
} }
void write_dmtdata_t_to(val_type *const dest) const { void write_to(val_type *const dest) const {
strcpy(dest->c, v.c); strcpy(dest->c, v.c);
} }
dmt_functor(const char* c) { vwriter(const char* c) {
invariant(strlen(c) < sizeof(val_type)); invariant(strlen(c) < sizeof(val_type));
strcpy(v.c, c); strcpy(v.c, c);
} }
dmt_functor(const uint32_t klpair_len, val_type *const src) { vwriter(const uint32_t klpair_len, val_type *const src) {
invariant(strlen(src->c) < sizeof(val_type)); invariant(strlen(src->c) < sizeof(val_type));
strcpy(v.c, src->c); strcpy(v.c, src->c);
invariant(klpair_len == get_dmtdatain_t_size()); invariant(klpair_len == get_size());
} }
private: private:
val_type v; val_type v;
...@@ -159,8 +158,7 @@ class dmt_functor<val_type> { ...@@ -159,8 +158,7 @@ class dmt_functor<val_type> {
} }
/* Globals */ /* Globals */
typedef toku::dmt<val_type, val_type*> vdmt; typedef toku::dmt<val_type, val_type*, toku::vwriter> vdmt;
typedef toku::dmt_functor<val_type> vfunctor;
const unsigned int random_seed = 0xFEADACBA; const unsigned int random_seed = 0xFEADACBA;
...@@ -211,7 +209,7 @@ static void test_builder_fixed(uint32_t len, uint32_t num) { ...@@ -211,7 +209,7 @@ static void test_builder_fixed(uint32_t len, uint32_t num) {
builder.create(num, num * len); builder.create(num, num * len);
for (uint32_t i = 0; i < num; i++) { for (uint32_t i = 0; i < num; i++) {
vfunctor vfun(data[i]); vwriter vfun(data[i]);
builder.append(vfun); builder.append(vfun);
} }
invariant(builder.value_length_is_fixed()); invariant(builder.value_length_is_fixed());
...@@ -230,7 +228,7 @@ static void test_builder_fixed(uint32_t len, uint32_t num) { ...@@ -230,7 +228,7 @@ static void test_builder_fixed(uint32_t len, uint32_t num) {
v2.delete_at(change); v2.delete_at(change);
fail_one_verify(len, num, &v2); fail_one_verify(len, num, &v2);
vfunctor vfun(data[change]); vwriter vfun(data[change]);
v2.insert_at(vfun, change); v2.insert_at(vfun, change);
verify(len, num, &v2); verify(len, num, &v2);
v2.destroy(); v2.destroy();
...@@ -258,7 +256,7 @@ static void test_builder_variable(uint32_t len, uint32_t len2, uint32_t num) { ...@@ -258,7 +256,7 @@ static void test_builder_variable(uint32_t len, uint32_t len2, uint32_t num) {
builder.create(num, (num-1) * len + len2); builder.create(num, (num-1) * len + len2);
for (uint32_t i = 0; i < num; i++) { for (uint32_t i = 0; i < num; i++) {
vfunctor vfun(data[i]); vwriter vfun(data[i]);
builder.append(vfun); builder.append(vfun);
} }
invariant(!builder.value_length_is_fixed()); invariant(!builder.value_length_is_fixed());
......
...@@ -357,7 +357,7 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) { ...@@ -357,7 +357,7 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) {
if (bn > 0) { if (bn > 0) {
assert(dest_ndd[bn].start >= dest_ndd[bn-1].start + dest_ndd[bn-1].size); assert(dest_ndd[bn].start >= dest_ndd[bn-1].start + dest_ndd[bn-1].size);
} }
for (uint32_t i = 0; i < BLB_DATA(dn, bn)->omt_size(); i++) { for (uint32_t i = 0; i < BLB_DATA(dn, bn)->dmt_size(); i++) {
LEAFENTRY curr_le; LEAFENTRY curr_le;
uint32_t curr_keylen; uint32_t curr_keylen;
void* curr_key; void* curr_key;
...@@ -431,7 +431,7 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone ...@@ -431,7 +431,7 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone
if (i < nrows-1) { if (i < nrows-1) {
uint32_t keylen; uint32_t keylen;
void* curr_key; void* curr_key;
BLB_DATA(&sn, i)->fetch_le_key_and_len(0, &keylen, &curr_key); BLB_DATA(&sn, i)->fetch_key_and_len(0, &keylen, &curr_key);
toku_memdup_dbt(&sn.childkeys[i], curr_key, keylen); toku_memdup_dbt(&sn.childkeys[i], curr_key, keylen);
} }
} }
...@@ -499,8 +499,8 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone ...@@ -499,8 +499,8 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone
if (bn > 0) { if (bn > 0) {
assert(dest_ndd[bn].start >= dest_ndd[bn-1].start + dest_ndd[bn-1].size); assert(dest_ndd[bn].start >= dest_ndd[bn-1].start + dest_ndd[bn-1].size);
} }
assert(BLB_DATA(dn, bn)->omt_size() > 0); assert(BLB_DATA(dn, bn)->dmt_size() > 0);
for (uint32_t i = 0; i < BLB_DATA(dn, bn)->omt_size(); i++) { for (uint32_t i = 0; i < BLB_DATA(dn, bn)->dmt_size(); i++) {
LEAFENTRY curr_le; LEAFENTRY curr_le;
uint32_t curr_keylen; uint32_t curr_keylen;
void* curr_key; void* curr_key;
...@@ -631,8 +631,8 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) { ...@@ -631,8 +631,8 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) {
if (bn > 0) { if (bn > 0) {
assert(dest_ndd[bn].start >= dest_ndd[bn-1].start + dest_ndd[bn-1].size); assert(dest_ndd[bn].start >= dest_ndd[bn-1].start + dest_ndd[bn-1].size);
} }
assert(BLB_DATA(dn, bn)->omt_size() > 0); assert(BLB_DATA(dn, bn)->dmt_size() > 0);
for (uint32_t i = 0; i < BLB_DATA(dn, bn)->omt_size(); i++) { for (uint32_t i = 0; i < BLB_DATA(dn, bn)->dmt_size(); i++) {
LEAFENTRY curr_le; LEAFENTRY curr_le;
uint32_t curr_keylen; uint32_t curr_keylen;
void* curr_key; void* curr_key;
...@@ -781,8 +781,8 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone) ...@@ -781,8 +781,8 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone)
if (bn > 0) { if (bn > 0) {
assert(dest_ndd[bn].start >= dest_ndd[bn-1].start + dest_ndd[bn-1].size); assert(dest_ndd[bn].start >= dest_ndd[bn-1].start + dest_ndd[bn-1].size);
} }
assert(BLB_DATA(dn, bn)->omt_size() > 0); assert(BLB_DATA(dn, bn)->dmt_size() > 0);
for (uint32_t i = 0; i < BLB_DATA(dn, bn)->omt_size(); i++) { for (uint32_t i = 0; i < BLB_DATA(dn, bn)->dmt_size(); i++) {
LEAFENTRY curr_le; LEAFENTRY curr_le;
uint32_t curr_keylen; uint32_t curr_keylen;
void* curr_key; void* curr_key;
...@@ -919,7 +919,7 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool ...@@ -919,7 +919,7 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool
if (bn > 0) { if (bn > 0) {
assert(dest_ndd[bn].start >= dest_ndd[bn-1].start + dest_ndd[bn-1].size); assert(dest_ndd[bn].start >= dest_ndd[bn-1].start + dest_ndd[bn-1].size);
} }
for (uint32_t i = 0; i < BLB_DATA(dn, bn)->omt_size(); i++) { for (uint32_t i = 0; i < BLB_DATA(dn, bn)->dmt_size(); i++) {
LEAFENTRY curr_le; LEAFENTRY curr_le;
uint32_t curr_keylen; uint32_t curr_keylen;
void* curr_key; void* curr_key;
...@@ -1040,7 +1040,7 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b ...@@ -1040,7 +1040,7 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b
if (i > 0) { if (i > 0) {
assert(dest_ndd[i].start >= dest_ndd[i-1].start + dest_ndd[i-1].size); assert(dest_ndd[i].start >= dest_ndd[i-1].start + dest_ndd[i-1].size);
} }
assert(BLB_DATA(dn, i)->omt_size() == 0); assert(BLB_DATA(dn, i)->dmt_size() == 0);
} }
} }
toku_ftnode_free(&dn); toku_ftnode_free(&dn);
......
...@@ -119,7 +119,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) ...@@ -119,7 +119,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
DBT theval; toku_fill_dbt(&theval, val, vallen); DBT theval; toku_fill_dbt(&theval, val, vallen);
// get an index that we can use to create a new leaf entry // get an index that we can use to create a new leaf entry
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size(); uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
......
...@@ -733,7 +733,7 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) { ...@@ -733,7 +733,7 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) {
int total_messages = 0; int total_messages = 0;
for (i = 0; i < 8; ++i) { for (i = 0; i < 8; ++i) {
total_messages += BLB_DATA(child, i)->omt_size(); total_messages += BLB_DATA(child, i)->dmt_size();
} }
assert(total_messages <= num_parent_messages + num_child_messages); assert(total_messages <= num_parent_messages + num_child_messages);
...@@ -746,7 +746,7 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) { ...@@ -746,7 +746,7 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) {
memset(parent_messages_present, 0, sizeof parent_messages_present); memset(parent_messages_present, 0, sizeof parent_messages_present);
memset(child_messages_present, 0, sizeof child_messages_present); memset(child_messages_present, 0, sizeof child_messages_present);
for (int j = 0; j < 8; ++j) { for (int j = 0; j < 8; ++j) {
uint32_t len = BLB_DATA(child, j)->omt_size(); uint32_t len = BLB_DATA(child, j)->dmt_size();
for (uint32_t idx = 0; idx < len; ++idx) { for (uint32_t idx = 0; idx < len; ++idx) {
LEAFENTRY le; LEAFENTRY le;
DBT keydbt, valdbt; DBT keydbt, valdbt;
...@@ -968,7 +968,7 @@ flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) { ...@@ -968,7 +968,7 @@ flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) {
int total_messages = 0; int total_messages = 0;
for (i = 0; i < 8; ++i) { for (i = 0; i < 8; ++i) {
total_messages += BLB_DATA(child, i)->omt_size(); total_messages += BLB_DATA(child, i)->dmt_size();
} }
assert(total_messages <= num_parent_messages + num_child_messages); assert(total_messages <= num_parent_messages + num_child_messages);
...@@ -1144,10 +1144,10 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) { ...@@ -1144,10 +1144,10 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) {
toku_ftnode_free(&parentnode); toku_ftnode_free(&parentnode);
for (int j = 0; j < 8; ++j) { for (int j = 0; j < 8; ++j) {
BN_DATA first = BLB_DATA(child1, j); bn_data* first = BLB_DATA(child1, j);
BN_DATA second = BLB_DATA(child2, j); bn_data* second = BLB_DATA(child2, j);
uint32_t len = first->omt_size(); uint32_t len = first->dmt_size();
assert(len == second->omt_size()); assert(len == second->dmt_size());
for (uint32_t idx = 0; idx < len; ++idx) { for (uint32_t idx = 0; idx < len; ++idx) {
LEAFENTRY le1, le2; LEAFENTRY le1, le2;
DBT key1dbt, val1dbt, key2dbt, val2dbt; DBT key1dbt, val1dbt, key2dbt, val2dbt;
......
...@@ -348,7 +348,7 @@ doit (int state) { ...@@ -348,7 +348,7 @@ doit (int state) {
assert(node->height == 0); assert(node->height == 0);
assert(!node->dirty); assert(!node->dirty);
assert(node->n_children == 1); assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->omt_size() == 1); assert(BLB_DATA(node, 0)->dmt_size() == 1);
toku_unpin_ftnode_off_client_thread(c_ft->ft, node); toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
toku_pin_ftnode_off_client_thread( toku_pin_ftnode_off_client_thread(
...@@ -364,7 +364,7 @@ doit (int state) { ...@@ -364,7 +364,7 @@ doit (int state) {
assert(node->height == 0); assert(node->height == 0);
assert(!node->dirty); assert(!node->dirty);
assert(node->n_children == 1); assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->omt_size() == 1); assert(BLB_DATA(node, 0)->dmt_size() == 1);
toku_unpin_ftnode_off_client_thread(c_ft->ft, node); toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
} }
else if (state == ft_flush_aflter_merge || state == flt_flush_before_unpin_remove) { else if (state == ft_flush_aflter_merge || state == flt_flush_before_unpin_remove) {
...@@ -381,7 +381,7 @@ doit (int state) { ...@@ -381,7 +381,7 @@ doit (int state) {
assert(node->height == 0); assert(node->height == 0);
assert(!node->dirty); assert(!node->dirty);
assert(node->n_children == 1); assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->omt_size() == 2); assert(BLB_DATA(node, 0)->dmt_size() == 2);
toku_unpin_ftnode_off_client_thread(c_ft->ft, node); toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
} }
else { else {
......
...@@ -359,7 +359,7 @@ doit (int state) { ...@@ -359,7 +359,7 @@ doit (int state) {
assert(node->height == 0); assert(node->height == 0);
assert(!node->dirty); assert(!node->dirty);
assert(node->n_children == 1); assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->omt_size() == 2); assert(BLB_DATA(node, 0)->dmt_size() == 2);
toku_unpin_ftnode_off_client_thread(c_ft->ft, node); toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
toku_pin_ftnode_off_client_thread( toku_pin_ftnode_off_client_thread(
...@@ -375,7 +375,7 @@ doit (int state) { ...@@ -375,7 +375,7 @@ doit (int state) {
assert(node->height == 0); assert(node->height == 0);
assert(!node->dirty); assert(!node->dirty);
assert(node->n_children == 1); assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->omt_size() == 2); assert(BLB_DATA(node, 0)->dmt_size() == 2);
toku_unpin_ftnode_off_client_thread(c_ft->ft, node); toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
......
...@@ -342,7 +342,7 @@ doit (bool after_split) { ...@@ -342,7 +342,7 @@ doit (bool after_split) {
assert(node->height == 0); assert(node->height == 0);
assert(!node->dirty); assert(!node->dirty);
assert(node->n_children == 1); assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->omt_size() == 1); assert(BLB_DATA(node, 0)->dmt_size() == 1);
toku_unpin_ftnode_off_client_thread(c_ft->ft, node); toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
toku_pin_ftnode_off_client_thread( toku_pin_ftnode_off_client_thread(
...@@ -358,7 +358,7 @@ doit (bool after_split) { ...@@ -358,7 +358,7 @@ doit (bool after_split) {
assert(node->height == 0); assert(node->height == 0);
assert(!node->dirty); assert(!node->dirty);
assert(node->n_children == 1); assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->omt_size() == 1); assert(BLB_DATA(node, 0)->dmt_size() == 1);
toku_unpin_ftnode_off_client_thread(c_ft->ft, node); toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
} }
else { else {
...@@ -375,7 +375,7 @@ doit (bool after_split) { ...@@ -375,7 +375,7 @@ doit (bool after_split) {
assert(node->height == 0); assert(node->height == 0);
assert(!node->dirty); assert(!node->dirty);
assert(node->n_children == 1); assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->omt_size() == 2); assert(BLB_DATA(node, 0)->dmt_size() == 2);
toku_unpin_ftnode_off_client_thread(c_ft->ft, node); toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
} }
......
...@@ -122,7 +122,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) ...@@ -122,7 +122,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
DBT theval; toku_fill_dbt(&theval, val, vallen); DBT theval; toku_fill_dbt(&theval, val, vallen);
// get an index that we can use to create a new leaf entry // get an index that we can use to create a new leaf entry
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size(); uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
......
...@@ -111,7 +111,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) ...@@ -111,7 +111,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
DBT theval; toku_fill_dbt(&theval, val, vallen); DBT theval; toku_fill_dbt(&theval, val, vallen);
// get an index that we can use to create a new leaf entry // get an index that we can use to create a new leaf entry
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size(); uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
// apply an insert to the leaf node // apply an insert to the leaf node
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
......
...@@ -112,7 +112,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) ...@@ -112,7 +112,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
DBT theval; toku_fill_dbt(&theval, val, vallen); DBT theval; toku_fill_dbt(&theval, val, vallen);
// get an index that we can use to create a new leaf entry // get an index that we can use to create a new leaf entry
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size(); uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
// apply an insert to the leaf node // apply an insert to the leaf node
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
......
...@@ -111,7 +111,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) ...@@ -111,7 +111,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
DBT theval; toku_fill_dbt(&theval, val, vallen); DBT theval; toku_fill_dbt(&theval, val, vallen);
// get an index that we can use to create a new leaf entry // get an index that we can use to create a new leaf entry
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size(); uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
// apply an insert to the leaf node // apply an insert to the leaf node
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
......
...@@ -112,7 +112,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) ...@@ -112,7 +112,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
DBT theval; toku_fill_dbt(&theval, val, vallen); DBT theval; toku_fill_dbt(&theval, val, vallen);
// get an index that we can use to create a new leaf entry // get an index that we can use to create a new leaf entry
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size(); uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
// apply an insert to the leaf node // apply an insert to the leaf node
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
......
...@@ -114,7 +114,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) ...@@ -114,7 +114,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
toku_fill_dbt(&theval, val, vallen); toku_fill_dbt(&theval, val, vallen);
// get an index that we can use to create a new leaf entry // get an index that we can use to create a new leaf entry
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size(); uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
// apply an insert to the leaf node // apply an insert to the leaf node
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
......
...@@ -111,7 +111,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) ...@@ -111,7 +111,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
DBT theval; toku_fill_dbt(&theval, val, vallen); DBT theval; toku_fill_dbt(&theval, val, vallen);
// get an index that we can use to create a new leaf entry // get an index that we can use to create a new leaf entry
uint32_t idx = BLB_DATA(leafnode, 0)->omt_size(); uint32_t idx = BLB_DATA(leafnode, 0)->dmt_size();
// apply an insert to the leaf node // apply an insert to the leaf node
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
......
...@@ -315,9 +315,9 @@ dump_node (int f, BLOCKNUM blocknum, FT h) { ...@@ -315,9 +315,9 @@ dump_node (int f, BLOCKNUM blocknum, FT h) {
} }
} else { } else {
printf(" n_bytes_in_buffer= %" PRIu64 "", BLB_DATA(n, i)->get_disk_size()); printf(" n_bytes_in_buffer= %" PRIu64 "", BLB_DATA(n, i)->get_disk_size());
printf(" items_in_buffer=%u\n", BLB_DATA(n, i)->omt_size()); printf(" items_in_buffer=%u\n", BLB_DATA(n, i)->dmt_size());
if (dump_data) { if (dump_data) {
BLB_DATA(n, i)->omt_iterate<void, print_le>(NULL); BLB_DATA(n, i)->dmt_iterate<void, print_le>(NULL);
} }
} }
} }
......
...@@ -96,8 +96,8 @@ PATENT RIGHTS GRANT: ...@@ -96,8 +96,8 @@ PATENT RIGHTS GRANT:
namespace toku { namespace toku {
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::create(void) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::create(void) {
toku_mempool_zero(&this->mp); toku_mempool_zero(&this->mp);
this->values_same_size = true; this->values_same_size = true;
this->value_length = 0; this->value_length = 0;
...@@ -115,8 +115,8 @@ void dmt<dmtdata_t, dmtdataout_t>::create(void) { ...@@ -115,8 +115,8 @@ void dmt<dmtdata_t, dmtdataout_t>::create(void) {
* Also all current uses (as of Jan 12, 2014) of this function would require mallocing a new array * Also all current uses (as of Jan 12, 2014) of this function would require mallocing a new array
* in order to allow stealing. * in order to allow stealing.
*/ */
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::create_from_sorted_memory_of_fixed_size_elements( void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::create_from_sorted_memory_of_fixed_size_elements(
const void *mem, const void *mem,
const uint32_t numvalues, const uint32_t numvalues,
const uint32_t mem_length, const uint32_t mem_length,
...@@ -149,14 +149,14 @@ void dmt<dmtdata_t, dmtdataout_t>::create_from_sorted_memory_of_fixed_size_eleme ...@@ -149,14 +149,14 @@ void dmt<dmtdata_t, dmtdataout_t>::create_from_sorted_memory_of_fixed_size_eleme
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::clone(const dmt &src) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::clone(const dmt &src) {
*this = src; *this = src;
toku_mempool_clone(&src.mp, &this->mp); toku_mempool_clone(&src.mp, &this->mp);
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::clear(void) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::clear(void) {
this->is_array = true; this->is_array = true;
this->d.a.num_values = 0; this->d.a.num_values = 0;
this->values_same_size = true; // Reset state this->values_same_size = true; // Reset state
...@@ -166,14 +166,14 @@ void dmt<dmtdata_t, dmtdataout_t>::clear(void) { ...@@ -166,14 +166,14 @@ void dmt<dmtdata_t, dmtdataout_t>::clear(void) {
toku_mempool_reset(&this->mp); toku_mempool_reset(&this->mp);
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::destroy(void) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::destroy(void) {
this->clear(); this->clear();
toku_mempool_destroy(&this->mp); toku_mempool_destroy(&this->mp);
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
uint32_t dmt<dmtdata_t, dmtdataout_t>::size(void) const { uint32_t dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::size(void) const {
if (this->is_array) { if (this->is_array) {
return this->d.a.num_values; return this->d.a.num_values;
} else { } else {
...@@ -181,8 +181,8 @@ uint32_t dmt<dmtdata_t, dmtdataout_t>::size(void) const { ...@@ -181,8 +181,8 @@ uint32_t dmt<dmtdata_t, dmtdataout_t>::size(void) const {
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
uint32_t dmt<dmtdata_t, dmtdataout_t>::nweight(const subtree &subtree) const { uint32_t dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::nweight(const subtree &subtree) const {
if (subtree.is_null()) { if (subtree.is_null()) {
return 0; return 0;
} else { } else {
...@@ -191,9 +191,9 @@ uint32_t dmt<dmtdata_t, dmtdataout_t>::nweight(const subtree &subtree) const { ...@@ -191,9 +191,9 @@ uint32_t dmt<dmtdata_t, dmtdataout_t>::nweight(const subtree &subtree) const {
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<typename dmtcmp_t, int (*h)(const uint32_t size, const dmtdata_t &, const dmtcmp_t &)> template<typename dmtcmp_t, int (*h)(const uint32_t size, const dmtdata_t &, const dmtcmp_t &)>
int dmt<dmtdata_t, dmtdataout_t>::insert(const dmtdatain_t &value, const dmtcmp_t &v, uint32_t *const idx) { int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::insert(const dmtwriter_t &value, const dmtcmp_t &v, uint32_t *const idx) {
int r; int r;
uint32_t insert_idx; uint32_t insert_idx;
...@@ -210,11 +210,11 @@ int dmt<dmtdata_t, dmtdataout_t>::insert(const dmtdatain_t &value, const dmtcmp_ ...@@ -210,11 +210,11 @@ int dmt<dmtdata_t, dmtdataout_t>::insert(const dmtdatain_t &value, const dmtcmp_
return 0; return 0;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
int dmt<dmtdata_t, dmtdataout_t>::insert_at(const dmtdatain_t &value, const uint32_t idx) { int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::insert_at(const dmtwriter_t &value, const uint32_t idx) {
if (idx > this->size()) { return EINVAL; } if (idx > this->size()) { return EINVAL; }
bool same_size = this->values_same_size && (this->size() == 0 || value.get_dmtdatain_t_size() == this->value_length); bool same_size = this->values_same_size && (this->size() == 0 || value.get_size() == this->value_length);
if (this->is_array) { if (this->is_array) {
if (same_size && idx == this->d.a.num_values) { if (same_size && idx == this->d.a.num_values) {
return this->insert_at_array_end<true>(value); return this->insert_at_array_end<true>(value);
...@@ -237,26 +237,26 @@ int dmt<dmtdata_t, dmtdataout_t>::insert_at(const dmtdatain_t &value, const uint ...@@ -237,26 +237,26 @@ int dmt<dmtdata_t, dmtdataout_t>::insert_at(const dmtdatain_t &value, const uint
return 0; return 0;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<bool with_resize> template<bool with_resize>
int dmt<dmtdata_t, dmtdataout_t>::insert_at_array_end(const dmtdatain_t& value_in) { int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::insert_at_array_end(const dmtwriter_t& value_in) {
paranoid_invariant(this->is_array); paranoid_invariant(this->is_array);
paranoid_invariant(this->values_same_size); paranoid_invariant(this->values_same_size);
if (this->d.a.num_values == 0) { if (this->d.a.num_values == 0) {
this->value_length = value_in.get_dmtdatain_t_size(); this->value_length = value_in.get_size();
} }
paranoid_invariant(this->value_length == value_in.get_dmtdatain_t_size()); paranoid_invariant(this->value_length == value_in.get_size());
if (with_resize) { if (with_resize) {
this->maybe_resize_array_for_insert(); this->maybe_resize_array_for_insert();
} }
dmtdata_t *dest = this->alloc_array_value_end(); dmtdata_t *dest = this->alloc_array_value_end();
value_in.write_dmtdata_t_to(dest); value_in.write_to(dest);
return 0; return 0;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
dmtdata_t * dmt<dmtdata_t, dmtdataout_t>::alloc_array_value_end(void) { dmtdata_t * dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::alloc_array_value_end(void) {
paranoid_invariant(this->is_array); paranoid_invariant(this->is_array);
paranoid_invariant(this->values_same_size); paranoid_invariant(this->values_same_size);
this->d.a.num_values++; this->d.a.num_values++;
...@@ -269,8 +269,8 @@ dmtdata_t * dmt<dmtdata_t, dmtdataout_t>::alloc_array_value_end(void) { ...@@ -269,8 +269,8 @@ dmtdata_t * dmt<dmtdata_t, dmtdataout_t>::alloc_array_value_end(void) {
return n; return n;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
dmtdata_t * dmt<dmtdata_t, dmtdataout_t>::get_array_value(const uint32_t idx) const { dmtdata_t * dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::get_array_value(const uint32_t idx) const {
paranoid_invariant(this->is_array); paranoid_invariant(this->is_array);
paranoid_invariant(this->values_same_size); paranoid_invariant(this->values_same_size);
...@@ -278,16 +278,16 @@ dmtdata_t * dmt<dmtdata_t, dmtdataout_t>::get_array_value(const uint32_t idx) co ...@@ -278,16 +278,16 @@ dmtdata_t * dmt<dmtdata_t, dmtdataout_t>::get_array_value(const uint32_t idx) co
return get_array_value_internal(&this->mp, idx); return get_array_value_internal(&this->mp, idx);
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
dmtdata_t * dmt<dmtdata_t, dmtdataout_t>::get_array_value_internal(const struct mempool *mempool, const uint32_t idx) const { dmtdata_t * dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::get_array_value_internal(const struct mempool *mempool, const uint32_t idx) const {
void* ptr = toku_mempool_get_pointer_from_base_and_offset(mempool, idx * align(this->value_length)); void* ptr = toku_mempool_get_pointer_from_base_and_offset(mempool, idx * align(this->value_length));
dmtdata_t *CAST_FROM_VOIDP(value, ptr); dmtdata_t *CAST_FROM_VOIDP(value, ptr);
return value; return value;
} }
//TODO(leif) write microbenchmarks to compare growth factor. Note: growth factor here is actually 2.5 because of mempool_construct //TODO(leif) write microbenchmarks to compare growth factor. Note: growth factor here is actually 2.5 because of mempool_construct
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::maybe_resize_array_for_insert(void) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::maybe_resize_array_for_insert(void) {
bool space_available = toku_mempool_get_free_space(&this->mp) >= align(this->value_length); bool space_available = toku_mempool_get_free_space(&this->mp) >= align(this->value_length);
if (!space_available) { if (!space_available) {
...@@ -311,20 +311,20 @@ void dmt<dmtdata_t, dmtdataout_t>::maybe_resize_array_for_insert(void) { ...@@ -311,20 +311,20 @@ void dmt<dmtdata_t, dmtdataout_t>::maybe_resize_array_for_insert(void) {
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
uint32_t dmt<dmtdata_t, dmtdataout_t>::align(const uint32_t x) const { uint32_t dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::align(const uint32_t x) const {
return roundup_to_multiple(ALIGNMENT, x); return roundup_to_multiple(ALIGNMENT, x);
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::prepare_for_serialize(void) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::prepare_for_serialize(void) {
if (!this->is_array) { if (!this->is_array) {
this->convert_from_tree_to_array(); this->convert_from_tree_to_array();
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::convert_from_tree_to_array(void) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::convert_from_tree_to_array(void) {
paranoid_invariant(!this->is_array); paranoid_invariant(!this->is_array);
paranoid_invariant(this->values_same_size); paranoid_invariant(this->values_same_size);
...@@ -358,8 +358,8 @@ void dmt<dmtdata_t, dmtdataout_t>::convert_from_tree_to_array(void) { ...@@ -358,8 +358,8 @@ void dmt<dmtdata_t, dmtdataout_t>::convert_from_tree_to_array(void) {
if (malloced) toku_free(tmp_array); if (malloced) toku_free(tmp_array);
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::convert_from_array_to_tree(void) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::convert_from_array_to_tree(void) {
paranoid_invariant(this->is_array); paranoid_invariant(this->is_array);
paranoid_invariant(this->values_same_size); paranoid_invariant(this->values_same_size);
...@@ -379,8 +379,8 @@ void dmt<dmtdata_t, dmtdataout_t>::convert_from_array_to_tree(void) { ...@@ -379,8 +379,8 @@ void dmt<dmtdata_t, dmtdataout_t>::convert_from_array_to_tree(void) {
toku_mempool_construct(&this->mp, mem_needed); toku_mempool_construct(&this->mp, mem_needed);
for (uint32_t i = 0; i < num_values; i++) { for (uint32_t i = 0; i < num_values; i++) {
dmtdatain_t functor(this->value_length, get_array_value_internal(&old_mp, i)); dmtwriter_t writer(this->value_length, get_array_value_internal(&old_mp, i));
tmp_array[i] = node_malloc_and_set_value(functor); tmp_array[i] = node_malloc_and_set_value(writer);
} }
this->is_array = false; this->is_array = false;
this->rebuild_subtree_from_offsets(&this->d.t.root, tmp_array, num_values); this->rebuild_subtree_from_offsets(&this->d.t.root, tmp_array, num_values);
...@@ -389,8 +389,8 @@ void dmt<dmtdata_t, dmtdataout_t>::convert_from_array_to_tree(void) { ...@@ -389,8 +389,8 @@ void dmt<dmtdata_t, dmtdataout_t>::convert_from_array_to_tree(void) {
toku_mempool_destroy(&old_mp); toku_mempool_destroy(&old_mp);
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
int dmt<dmtdata_t, dmtdataout_t>::delete_at(const uint32_t idx) { int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::delete_at(const uint32_t idx) {
uint32_t n = this->size(); uint32_t n = this->size();
if (idx >= n) { return EINVAL; } if (idx >= n) { return EINVAL; }
...@@ -412,17 +412,17 @@ int dmt<dmtdata_t, dmtdataout_t>::delete_at(const uint32_t idx) { ...@@ -412,17 +412,17 @@ int dmt<dmtdata_t, dmtdataout_t>::delete_at(const uint32_t idx) {
return 0; return 0;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<typename iterate_extra_t, template<typename iterate_extra_t,
int (*f)(const uint32_t, const dmtdata_t &, const uint32_t, iterate_extra_t *const)> int (*f)(const uint32_t, const dmtdata_t &, const uint32_t, iterate_extra_t *const)>
int dmt<dmtdata_t, dmtdataout_t>::iterate(iterate_extra_t *const iterate_extra) const { int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::iterate(iterate_extra_t *const iterate_extra) const {
return this->iterate_on_range<iterate_extra_t, f>(0, this->size(), iterate_extra); return this->iterate_on_range<iterate_extra_t, f>(0, this->size(), iterate_extra);
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<typename iterate_extra_t, template<typename iterate_extra_t,
int (*f)(const uint32_t, const dmtdata_t &, const uint32_t, iterate_extra_t *const)> int (*f)(const uint32_t, const dmtdata_t &, const uint32_t, iterate_extra_t *const)>
int dmt<dmtdata_t, dmtdataout_t>::iterate_on_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra) const { int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::iterate_on_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra) const {
if (right > this->size()) { return EINVAL; } if (right > this->size()) { return EINVAL; }
if (left == right) { return 0; } if (left == right) { return 0; }
if (this->is_array) { if (this->is_array) {
...@@ -431,8 +431,8 @@ int dmt<dmtdata_t, dmtdataout_t>::iterate_on_range(const uint32_t left, const ui ...@@ -431,8 +431,8 @@ int dmt<dmtdata_t, dmtdataout_t>::iterate_on_range(const uint32_t left, const ui
return this->iterate_internal<iterate_extra_t, f>(left, right, this->d.t.root, 0, iterate_extra); return this->iterate_internal<iterate_extra_t, f>(left, right, this->d.t.root, 0, iterate_extra);
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::verify(void) const { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::verify(void) const {
uint32_t num_values = this->size(); uint32_t num_values = this->size();
invariant(num_values < UINT32_MAX); invariant(num_values < UINT32_MAX);
size_t pool_used = toku_mempool_get_used_space(&this->mp); size_t pool_used = toku_mempool_get_used_space(&this->mp);
...@@ -469,8 +469,8 @@ void dmt<dmtdata_t, dmtdataout_t>::verify(void) const { ...@@ -469,8 +469,8 @@ void dmt<dmtdata_t, dmtdataout_t>::verify(void) const {
} }
// Verifies all weights are internally consistent. // Verifies all weights are internally consistent.
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::verify_internal(const subtree &subtree, std::vector<bool> *touched) const { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::verify_internal(const subtree &subtree, std::vector<bool> *touched) const {
if (subtree.is_null()) { if (subtree.is_null()) {
return; return;
} }
...@@ -499,10 +499,10 @@ void dmt<dmtdata_t, dmtdataout_t>::verify_internal(const subtree &subtree, std:: ...@@ -499,10 +499,10 @@ void dmt<dmtdata_t, dmtdataout_t>::verify_internal(const subtree &subtree, std::
verify_internal(node.right, touched); verify_internal(node.right, touched);
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<typename iterate_extra_t, template<typename iterate_extra_t,
int (*f)(const uint32_t, dmtdata_t *, const uint32_t, iterate_extra_t *const)> int (*f)(const uint32_t, dmtdata_t *, const uint32_t, iterate_extra_t *const)>
void dmt<dmtdata_t, dmtdataout_t>::iterate_ptr(iterate_extra_t *const iterate_extra) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::iterate_ptr(iterate_extra_t *const iterate_extra) {
if (this->is_array) { if (this->is_array) {
this->iterate_ptr_internal_array<iterate_extra_t, f>(0, this->size(), iterate_extra); this->iterate_ptr_internal_array<iterate_extra_t, f>(0, this->size(), iterate_extra);
} else { } else {
...@@ -510,8 +510,8 @@ void dmt<dmtdata_t, dmtdataout_t>::iterate_ptr(iterate_extra_t *const iterate_ex ...@@ -510,8 +510,8 @@ void dmt<dmtdata_t, dmtdataout_t>::iterate_ptr(iterate_extra_t *const iterate_ex
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
int dmt<dmtdata_t, dmtdataout_t>::fetch(const uint32_t idx, uint32_t *const value_len, dmtdataout_t *const value) const { int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::fetch(const uint32_t idx, uint32_t *const value_len, dmtdataout_t *const value) const {
if (idx >= this->size()) { return EINVAL; } if (idx >= this->size()) { return EINVAL; }
if (this->is_array) { if (this->is_array) {
this->fetch_internal_array(idx, value_len, value); this->fetch_internal_array(idx, value_len, value);
...@@ -521,10 +521,10 @@ int dmt<dmtdata_t, dmtdataout_t>::fetch(const uint32_t idx, uint32_t *const valu ...@@ -521,10 +521,10 @@ int dmt<dmtdata_t, dmtdataout_t>::fetch(const uint32_t idx, uint32_t *const valu
return 0; return 0;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<typename dmtcmp_t, template<typename dmtcmp_t,
int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)> int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
int dmt<dmtdata_t, dmtdataout_t>::find_zero(const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const { int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::find_zero(const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const {
uint32_t tmp_index; uint32_t tmp_index;
uint32_t *const child_idxp = (idxp != nullptr) ? idxp : &tmp_index; uint32_t *const child_idxp = (idxp != nullptr) ? idxp : &tmp_index;
int r; int r;
...@@ -537,10 +537,10 @@ int dmt<dmtdata_t, dmtdataout_t>::find_zero(const dmtcmp_t &extra, uint32_t *con ...@@ -537,10 +537,10 @@ int dmt<dmtdata_t, dmtdataout_t>::find_zero(const dmtcmp_t &extra, uint32_t *con
return r; return r;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<typename dmtcmp_t, template<typename dmtcmp_t,
int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)> int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
int dmt<dmtdata_t, dmtdataout_t>::find(const dmtcmp_t &extra, int direction, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const { int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::find(const dmtcmp_t &extra, int direction, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const {
uint32_t tmp_index; uint32_t tmp_index;
uint32_t *const child_idxp = (idxp != nullptr) ? idxp : &tmp_index; uint32_t *const child_idxp = (idxp != nullptr) ? idxp : &tmp_index;
paranoid_invariant(direction != 0); paranoid_invariant(direction != 0);
...@@ -559,33 +559,33 @@ int dmt<dmtdata_t, dmtdataout_t>::find(const dmtcmp_t &extra, int direction, uin ...@@ -559,33 +559,33 @@ int dmt<dmtdata_t, dmtdataout_t>::find(const dmtcmp_t &extra, int direction, uin
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
size_t dmt<dmtdata_t, dmtdataout_t>::memory_size(void) { size_t dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::memory_size(void) {
return (sizeof *this) + toku_mempool_get_size(&this->mp); return (sizeof *this) + toku_mempool_get_size(&this->mp);
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
dmt_node_templated<dmtdata_t> & dmt<dmtdata_t, dmtdataout_t>::get_node(const subtree &subtree) const { dmt_node_templated<dmtdata_t> & dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::get_node(const subtree &subtree) const {
paranoid_invariant(!subtree.is_null()); paranoid_invariant(!subtree.is_null());
return get_node(subtree.get_offset()); return get_node(subtree.get_offset());
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
dmt_node_templated<dmtdata_t> & dmt<dmtdata_t, dmtdataout_t>::get_node(const node_offset offset) const { dmt_node_templated<dmtdata_t> & dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::get_node(const node_offset offset) const {
void* ptr = toku_mempool_get_pointer_from_base_and_offset(&this->mp, offset); void* ptr = toku_mempool_get_pointer_from_base_and_offset(&this->mp, offset);
dmt_node *CAST_FROM_VOIDP(node, ptr); dmt_node *CAST_FROM_VOIDP(node, ptr);
return *node; return *node;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::node_set_value(dmt_node * n, const dmtdatain_t &value) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::node_set_value(dmt_node * n, const dmtwriter_t &value) {
n->value_length = value.get_dmtdatain_t_size(); n->value_length = value.get_size();
value.write_dmtdata_t_to(&n->value); value.write_to(&n->value);
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
node_offset dmt<dmtdata_t, dmtdataout_t>::node_malloc_and_set_value(const dmtdatain_t &value) { node_offset dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::node_malloc_and_set_value(const dmtwriter_t &value) {
size_t val_size = value.get_dmtdatain_t_size(); size_t val_size = value.get_size();
size_t size_to_alloc = __builtin_offsetof(dmt_node, value) + val_size; size_t size_to_alloc = __builtin_offsetof(dmt_node, value) + val_size;
size_to_alloc = align(size_to_alloc); size_to_alloc = align(size_to_alloc);
void* np = toku_mempool_malloc(&this->mp, size_to_alloc, 1); void* np = toku_mempool_malloc(&this->mp, size_to_alloc, 1);
...@@ -596,22 +596,22 @@ node_offset dmt<dmtdata_t, dmtdataout_t>::node_malloc_and_set_value(const dmtdat ...@@ -596,22 +596,22 @@ node_offset dmt<dmtdata_t, dmtdataout_t>::node_malloc_and_set_value(const dmtdat
return toku_mempool_get_offset_from_pointer_and_base(&this->mp, np); return toku_mempool_get_offset_from_pointer_and_base(&this->mp, np);
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::node_free(const subtree &st) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::node_free(const subtree &st) {
dmt_node &n = get_node(st); dmt_node &n = get_node(st);
size_t size_to_free = __builtin_offsetof(dmt_node, value) + n.value_length; size_t size_to_free = __builtin_offsetof(dmt_node, value) + n.value_length;
size_to_free = align(size_to_free); size_to_free = align(size_to_free);
toku_mempool_mfree(&this->mp, &n, size_to_free); toku_mempool_mfree(&this->mp, &n, size_to_free);
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::maybe_resize_tree(const dmtdatain_t * value) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::maybe_resize_tree(const dmtwriter_t * value) {
const ssize_t curr_capacity = toku_mempool_get_size(&this->mp); const ssize_t curr_capacity = toku_mempool_get_size(&this->mp);
const ssize_t curr_free = toku_mempool_get_free_space(&this->mp); const ssize_t curr_free = toku_mempool_get_free_space(&this->mp);
const ssize_t curr_used = toku_mempool_get_used_space(&this->mp); const ssize_t curr_used = toku_mempool_get_used_space(&this->mp);
ssize_t add_size = 0; ssize_t add_size = 0;
if (value) { if (value) {
add_size = __builtin_offsetof(dmt_node, value) + value->get_dmtdatain_t_size(); add_size = __builtin_offsetof(dmt_node, value) + value->get_size();
add_size = align(add_size); add_size = align(add_size);
} }
...@@ -662,8 +662,8 @@ void dmt<dmtdata_t, dmtdataout_t>::maybe_resize_tree(const dmtdatain_t * value) ...@@ -662,8 +662,8 @@ void dmt<dmtdata_t, dmtdataout_t>::maybe_resize_tree(const dmtdatain_t * value)
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
bool dmt<dmtdata_t, dmtdataout_t>::will_need_rebalance(const subtree &subtree, const int leftmod, const int rightmod) const { bool dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::will_need_rebalance(const subtree &subtree, const int leftmod, const int rightmod) const {
if (subtree.is_null()) { return false; } if (subtree.is_null()) { return false; }
const dmt_node &n = get_node(subtree); const dmt_node &n = get_node(subtree);
// one of the 1's is for the root. // one of the 1's is for the root.
...@@ -675,8 +675,8 @@ bool dmt<dmtdata_t, dmtdataout_t>::will_need_rebalance(const subtree &subtree, c ...@@ -675,8 +675,8 @@ bool dmt<dmtdata_t, dmtdataout_t>::will_need_rebalance(const subtree &subtree, c
(1+weight_right < (1+1+weight_left)/2)); (1+weight_right < (1+1+weight_left)/2));
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::insert_internal(subtree *const subtreep, const dmtdatain_t &value, const uint32_t idx, subtree **const rebalance_subtree) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::insert_internal(subtree *const subtreep, const dmtwriter_t &value, const uint32_t idx, subtree **const rebalance_subtree) {
if (subtreep->is_null()) { if (subtreep->is_null()) {
paranoid_invariant_zero(idx); paranoid_invariant_zero(idx);
const node_offset newoffset = this->node_malloc_and_set_value(value); const node_offset newoffset = this->node_malloc_and_set_value(value);
...@@ -703,8 +703,8 @@ void dmt<dmtdata_t, dmtdataout_t>::insert_internal(subtree *const subtreep, cons ...@@ -703,8 +703,8 @@ void dmt<dmtdata_t, dmtdataout_t>::insert_internal(subtree *const subtreep, cons
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::delete_internal(subtree *const subtreep, const uint32_t idx, subtree *const subtree_replace, subtree **const rebalance_subtree) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::delete_internal(subtree *const subtreep, const uint32_t idx, subtree *const subtree_replace, subtree **const rebalance_subtree) {
paranoid_invariant_notnull(subtreep); paranoid_invariant_notnull(subtreep);
paranoid_invariant_notnull(rebalance_subtree); paranoid_invariant_notnull(rebalance_subtree);
paranoid_invariant(!subtreep->is_null()); paranoid_invariant(!subtreep->is_null());
...@@ -766,10 +766,10 @@ void dmt<dmtdata_t, dmtdataout_t>::delete_internal(subtree *const subtreep, cons ...@@ -766,10 +766,10 @@ void dmt<dmtdata_t, dmtdataout_t>::delete_internal(subtree *const subtreep, cons
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<typename iterate_extra_t, template<typename iterate_extra_t,
int (*f)(const uint32_t, const dmtdata_t &, const uint32_t, iterate_extra_t *const)> int (*f)(const uint32_t, const dmtdata_t &, const uint32_t, iterate_extra_t *const)>
int dmt<dmtdata_t, dmtdataout_t>::iterate_internal_array(const uint32_t left, const uint32_t right, int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::iterate_internal_array(const uint32_t left, const uint32_t right,
iterate_extra_t *const iterate_extra) const { iterate_extra_t *const iterate_extra) const {
int r; int r;
for (uint32_t i = left; i < right; ++i) { for (uint32_t i = left; i < right; ++i) {
...@@ -781,10 +781,10 @@ int dmt<dmtdata_t, dmtdataout_t>::iterate_internal_array(const uint32_t left, co ...@@ -781,10 +781,10 @@ int dmt<dmtdata_t, dmtdataout_t>::iterate_internal_array(const uint32_t left, co
return 0; return 0;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<typename iterate_extra_t, template<typename iterate_extra_t,
int (*f)(const uint32_t, dmtdata_t *, const uint32_t, iterate_extra_t *const)> int (*f)(const uint32_t, dmtdata_t *, const uint32_t, iterate_extra_t *const)>
void dmt<dmtdata_t, dmtdataout_t>::iterate_ptr_internal(const uint32_t left, const uint32_t right, void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::iterate_ptr_internal(const uint32_t left, const uint32_t right,
const subtree &subtree, const uint32_t idx, const subtree &subtree, const uint32_t idx,
iterate_extra_t *const iterate_extra) { iterate_extra_t *const iterate_extra) {
if (!subtree.is_null()) { if (!subtree.is_null()) {
...@@ -803,10 +803,10 @@ void dmt<dmtdata_t, dmtdataout_t>::iterate_ptr_internal(const uint32_t left, con ...@@ -803,10 +803,10 @@ void dmt<dmtdata_t, dmtdataout_t>::iterate_ptr_internal(const uint32_t left, con
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<typename iterate_extra_t, template<typename iterate_extra_t,
int (*f)(const uint32_t, dmtdata_t *, const uint32_t, iterate_extra_t *const)> int (*f)(const uint32_t, dmtdata_t *, const uint32_t, iterate_extra_t *const)>
void dmt<dmtdata_t, dmtdataout_t>::iterate_ptr_internal_array(const uint32_t left, const uint32_t right, void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::iterate_ptr_internal_array(const uint32_t left, const uint32_t right,
iterate_extra_t *const iterate_extra) { iterate_extra_t *const iterate_extra) {
for (uint32_t i = left; i < right; ++i) { for (uint32_t i = left; i < right; ++i) {
int r = f(this->value_length, get_array_value(i), i, iterate_extra); int r = f(this->value_length, get_array_value(i), i, iterate_extra);
...@@ -814,10 +814,10 @@ void dmt<dmtdata_t, dmtdataout_t>::iterate_ptr_internal_array(const uint32_t lef ...@@ -814,10 +814,10 @@ void dmt<dmtdata_t, dmtdataout_t>::iterate_ptr_internal_array(const uint32_t lef
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<typename iterate_extra_t, template<typename iterate_extra_t,
int (*f)(const uint32_t, const dmtdata_t &, const uint32_t, iterate_extra_t *const)> int (*f)(const uint32_t, const dmtdata_t &, const uint32_t, iterate_extra_t *const)>
int dmt<dmtdata_t, dmtdataout_t>::iterate_internal(const uint32_t left, const uint32_t right, int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::iterate_internal(const uint32_t left, const uint32_t right,
const subtree &subtree, const uint32_t idx, const subtree &subtree, const uint32_t idx,
iterate_extra_t *const iterate_extra) const { iterate_extra_t *const iterate_extra) const {
if (subtree.is_null()) { return 0; } if (subtree.is_null()) { return 0; }
...@@ -838,13 +838,13 @@ int dmt<dmtdata_t, dmtdataout_t>::iterate_internal(const uint32_t left, const ui ...@@ -838,13 +838,13 @@ int dmt<dmtdata_t, dmtdataout_t>::iterate_internal(const uint32_t left, const ui
return 0; return 0;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::fetch_internal_array(const uint32_t i, uint32_t *const value_len, dmtdataout_t *const value) const { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::fetch_internal_array(const uint32_t i, uint32_t *const value_len, dmtdataout_t *const value) const {
copyout(value_len, value, this->value_length, get_array_value(i)); copyout(value_len, value, this->value_length, get_array_value(i));
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::fetch_internal(const subtree &subtree, const uint32_t i, uint32_t *const value_len, dmtdataout_t *const value) const { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::fetch_internal(const subtree &subtree, const uint32_t i, uint32_t *const value_len, dmtdataout_t *const value) const {
dmt_node &n = get_node(subtree); dmt_node &n = get_node(subtree);
const uint32_t leftweight = this->nweight(n.left); const uint32_t leftweight = this->nweight(n.left);
if (i < leftweight) { if (i < leftweight) {
...@@ -856,8 +856,8 @@ void dmt<dmtdata_t, dmtdataout_t>::fetch_internal(const subtree &subtree, const ...@@ -856,8 +856,8 @@ void dmt<dmtdata_t, dmtdataout_t>::fetch_internal(const subtree &subtree, const
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::fill_array_with_subtree_offsets(node_offset *const array, const subtree &subtree) const { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::fill_array_with_subtree_offsets(node_offset *const array, const subtree &subtree) const {
if (!subtree.is_null()) { if (!subtree.is_null()) {
const dmt_node &tree = get_node(subtree); const dmt_node &tree = get_node(subtree);
this->fill_array_with_subtree_offsets(&array[0], tree.left); this->fill_array_with_subtree_offsets(&array[0], tree.left);
...@@ -866,8 +866,8 @@ void dmt<dmtdata_t, dmtdataout_t>::fill_array_with_subtree_offsets(node_offset * ...@@ -866,8 +866,8 @@ void dmt<dmtdata_t, dmtdataout_t>::fill_array_with_subtree_offsets(node_offset *
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::rebuild_subtree_from_offsets(subtree *const subtree, const node_offset *const offsets, const uint32_t numvalues) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::rebuild_subtree_from_offsets(subtree *const subtree, const node_offset *const offsets, const uint32_t numvalues) {
if (numvalues==0) { if (numvalues==0) {
subtree->set_to_null(); subtree->set_to_null();
} else { } else {
...@@ -882,8 +882,8 @@ void dmt<dmtdata_t, dmtdataout_t>::rebuild_subtree_from_offsets(subtree *const s ...@@ -882,8 +882,8 @@ void dmt<dmtdata_t, dmtdataout_t>::rebuild_subtree_from_offsets(subtree *const s
} }
//TODO(leif): Note that this can mess with our memory_footprint calculation (we may touch past what is marked as 'used' in the mempool) //TODO(leif): Note that this can mess with our memory_footprint calculation (we may touch past what is marked as 'used' in the mempool)
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
node_offset* dmt<dmtdata_t, dmtdataout_t>::alloc_temp_node_offsets(uint32_t num_offsets) { node_offset* dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::alloc_temp_node_offsets(uint32_t num_offsets) {
size_t mem_needed = num_offsets * sizeof(node_offset); size_t mem_needed = num_offsets * sizeof(node_offset);
size_t mem_free; size_t mem_free;
mem_free = toku_mempool_get_free_space(&this->mp); mem_free = toku_mempool_get_free_space(&this->mp);
...@@ -894,8 +894,8 @@ node_offset* dmt<dmtdata_t, dmtdataout_t>::alloc_temp_node_offsets(uint32_t num_ ...@@ -894,8 +894,8 @@ node_offset* dmt<dmtdata_t, dmtdataout_t>::alloc_temp_node_offsets(uint32_t num_
return nullptr; return nullptr;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::rebalance(subtree *const subtree) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::rebalance(subtree *const subtree) {
paranoid_invariant(!subtree->is_null()); paranoid_invariant(!subtree->is_null());
// There is a possible "optimization" here: // There is a possible "optimization" here:
...@@ -920,8 +920,8 @@ void dmt<dmtdata_t, dmtdataout_t>::rebalance(subtree *const subtree) { ...@@ -920,8 +920,8 @@ void dmt<dmtdata_t, dmtdataout_t>::rebalance(subtree *const subtree) {
if (malloced) toku_free(tmp_array); if (malloced) toku_free(tmp_array);
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::copyout(uint32_t *const outlen, dmtdata_t *const out, const dmt_node *const n) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::copyout(uint32_t *const outlen, dmtdata_t *const out, const dmt_node *const n) {
if (outlen) { if (outlen) {
*outlen = n->value_length; *outlen = n->value_length;
} }
...@@ -930,8 +930,8 @@ void dmt<dmtdata_t, dmtdataout_t>::copyout(uint32_t *const outlen, dmtdata_t *co ...@@ -930,8 +930,8 @@ void dmt<dmtdata_t, dmtdataout_t>::copyout(uint32_t *const outlen, dmtdata_t *co
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::copyout(uint32_t *const outlen, dmtdata_t **const out, dmt_node *const n) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::copyout(uint32_t *const outlen, dmtdata_t **const out, dmt_node *const n) {
if (outlen) { if (outlen) {
*outlen = n->value_length; *outlen = n->value_length;
} }
...@@ -940,8 +940,8 @@ void dmt<dmtdata_t, dmtdataout_t>::copyout(uint32_t *const outlen, dmtdata_t **c ...@@ -940,8 +940,8 @@ void dmt<dmtdata_t, dmtdataout_t>::copyout(uint32_t *const outlen, dmtdata_t **c
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::copyout(uint32_t *const outlen, dmtdata_t *const out, const uint32_t len, const dmtdata_t *const stored_value_ptr) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::copyout(uint32_t *const outlen, dmtdata_t *const out, const uint32_t len, const dmtdata_t *const stored_value_ptr) {
if (outlen) { if (outlen) {
*outlen = len; *outlen = len;
} }
...@@ -950,8 +950,8 @@ void dmt<dmtdata_t, dmtdataout_t>::copyout(uint32_t *const outlen, dmtdata_t *co ...@@ -950,8 +950,8 @@ void dmt<dmtdata_t, dmtdataout_t>::copyout(uint32_t *const outlen, dmtdata_t *co
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::copyout(uint32_t *const outlen, dmtdata_t **const out, const uint32_t len, dmtdata_t *const stored_value_ptr) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::copyout(uint32_t *const outlen, dmtdata_t **const out, const uint32_t len, dmtdata_t *const stored_value_ptr) {
if (outlen) { if (outlen) {
*outlen = len; *outlen = len;
} }
...@@ -960,10 +960,10 @@ void dmt<dmtdata_t, dmtdataout_t>::copyout(uint32_t *const outlen, dmtdata_t **c ...@@ -960,10 +960,10 @@ void dmt<dmtdata_t, dmtdataout_t>::copyout(uint32_t *const outlen, dmtdata_t **c
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<typename dmtcmp_t, template<typename dmtcmp_t,
int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)> int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
int dmt<dmtdata_t, dmtdataout_t>::find_internal_zero_array(const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const { int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::find_internal_zero_array(const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const {
paranoid_invariant_notnull(idxp); paranoid_invariant_notnull(idxp);
uint32_t min = 0; uint32_t min = 0;
uint32_t limit = this->d.a.num_values; uint32_t limit = this->d.a.num_values;
...@@ -996,10 +996,10 @@ int dmt<dmtdata_t, dmtdataout_t>::find_internal_zero_array(const dmtcmp_t &extra ...@@ -996,10 +996,10 @@ int dmt<dmtdata_t, dmtdataout_t>::find_internal_zero_array(const dmtcmp_t &extra
return DB_NOTFOUND; return DB_NOTFOUND;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<typename dmtcmp_t, template<typename dmtcmp_t,
int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)> int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
int dmt<dmtdata_t, dmtdataout_t>::find_internal_zero(const subtree &subtree, const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const { int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::find_internal_zero(const subtree &subtree, const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const {
paranoid_invariant_notnull(idxp); paranoid_invariant_notnull(idxp);
if (subtree.is_null()) { if (subtree.is_null()) {
*idxp = 0; *idxp = 0;
...@@ -1024,10 +1024,10 @@ int dmt<dmtdata_t, dmtdataout_t>::find_internal_zero(const subtree &subtree, con ...@@ -1024,10 +1024,10 @@ int dmt<dmtdata_t, dmtdataout_t>::find_internal_zero(const subtree &subtree, con
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<typename dmtcmp_t, template<typename dmtcmp_t,
int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)> int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
int dmt<dmtdata_t, dmtdataout_t>::find_internal_plus_array(const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const { int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::find_internal_plus_array(const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const {
paranoid_invariant_notnull(idxp); paranoid_invariant_notnull(idxp);
uint32_t min = 0; uint32_t min = 0;
uint32_t limit = this->d.a.num_values; uint32_t limit = this->d.a.num_values;
...@@ -1049,10 +1049,10 @@ int dmt<dmtdata_t, dmtdataout_t>::find_internal_plus_array(const dmtcmp_t &extra ...@@ -1049,10 +1049,10 @@ int dmt<dmtdata_t, dmtdataout_t>::find_internal_plus_array(const dmtcmp_t &extra
return 0; return 0;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<typename dmtcmp_t, template<typename dmtcmp_t,
int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)> int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
int dmt<dmtdata_t, dmtdataout_t>::find_internal_plus(const subtree &subtree, const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const { int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::find_internal_plus(const subtree &subtree, const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const {
paranoid_invariant_notnull(idxp); paranoid_invariant_notnull(idxp);
if (subtree.is_null()) { if (subtree.is_null()) {
return DB_NOTFOUND; return DB_NOTFOUND;
...@@ -1076,10 +1076,10 @@ int dmt<dmtdata_t, dmtdataout_t>::find_internal_plus(const subtree &subtree, con ...@@ -1076,10 +1076,10 @@ int dmt<dmtdata_t, dmtdataout_t>::find_internal_plus(const subtree &subtree, con
return r; return r;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<typename dmtcmp_t, template<typename dmtcmp_t,
int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)> int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
int dmt<dmtdata_t, dmtdataout_t>::find_internal_minus_array(const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const { int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::find_internal_minus_array(const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const {
paranoid_invariant_notnull(idxp); paranoid_invariant_notnull(idxp);
uint32_t min = 0; uint32_t min = 0;
uint32_t limit = this->d.a.num_values; uint32_t limit = this->d.a.num_values;
...@@ -1101,10 +1101,10 @@ int dmt<dmtdata_t, dmtdataout_t>::find_internal_minus_array(const dmtcmp_t &extr ...@@ -1101,10 +1101,10 @@ int dmt<dmtdata_t, dmtdataout_t>::find_internal_minus_array(const dmtcmp_t &extr
return 0; return 0;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
template<typename dmtcmp_t, template<typename dmtcmp_t,
int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)> int (*h)(const uint32_t, const dmtdata_t &, const dmtcmp_t &)>
int dmt<dmtdata_t, dmtdataout_t>::find_internal_minus(const subtree &subtree, const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const { int dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::find_internal_minus(const subtree &subtree, const dmtcmp_t &extra, uint32_t *const value_len, dmtdataout_t *const value, uint32_t *const idxp) const {
paranoid_invariant_notnull(idxp); paranoid_invariant_notnull(idxp);
if (subtree.is_null()) { if (subtree.is_null()) {
return DB_NOTFOUND; return DB_NOTFOUND;
...@@ -1126,23 +1126,23 @@ int dmt<dmtdata_t, dmtdataout_t>::find_internal_minus(const subtree &subtree, co ...@@ -1126,23 +1126,23 @@ int dmt<dmtdata_t, dmtdataout_t>::find_internal_minus(const subtree &subtree, co
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
uint32_t dmt<dmtdata_t, dmtdataout_t>::get_fixed_length(void) const { uint32_t dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::get_fixed_length(void) const {
return this->values_same_size ? this->value_length : 0; return this->values_same_size ? this->value_length : 0;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
uint32_t dmt<dmtdata_t, dmtdataout_t>::get_fixed_length_alignment_overhead(void) const { uint32_t dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::get_fixed_length_alignment_overhead(void) const {
return this->values_same_size ? align(this->value_length) - this->value_length : 0; return this->values_same_size ? align(this->value_length) - this->value_length : 0;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
bool dmt<dmtdata_t, dmtdataout_t>::value_length_is_fixed(void) const { bool dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::value_length_is_fixed(void) const {
return this->values_same_size; return this->values_same_size;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::serialize_values(uint32_t expected_unpadded_memory, struct wbuf *wb) const { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::serialize_values(uint32_t expected_unpadded_memory, struct wbuf *wb) const {
invariant(this->is_array); invariant(this->is_array);
invariant(this->values_same_size); invariant(this->values_same_size);
const uint8_t pad_bytes = get_fixed_length_alignment_overhead(); const uint8_t pad_bytes = get_fixed_length_alignment_overhead();
...@@ -1166,8 +1166,8 @@ void dmt<dmtdata_t, dmtdataout_t>::serialize_values(uint32_t expected_unpadded_m ...@@ -1166,8 +1166,8 @@ void dmt<dmtdata_t, dmtdataout_t>::serialize_values(uint32_t expected_unpadded_m
} }
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::builder::create(uint32_t _max_values, uint32_t _max_value_bytes) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::builder::create(uint32_t _max_values, uint32_t _max_value_bytes) {
this->max_values = _max_values; this->max_values = _max_values;
this->max_value_bytes = _max_value_bytes; this->max_value_bytes = _max_value_bytes;
this->temp.create(); this->temp.create();
...@@ -1180,11 +1180,11 @@ void dmt<dmtdata_t, dmtdataout_t>::builder::create(uint32_t _max_values, uint32_ ...@@ -1180,11 +1180,11 @@ void dmt<dmtdata_t, dmtdataout_t>::builder::create(uint32_t _max_values, uint32_
toku_mempool_construct(&this->temp.mp, initial_space); // Adds 25% toku_mempool_construct(&this->temp.mp, initial_space); // Adds 25%
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::builder::append(const dmtdatain_t &value) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::builder::append(const dmtwriter_t &value) {
paranoid_invariant(this->temp_valid); paranoid_invariant(this->temp_valid);
//NOTE: Always use d.a.num_values for size because we have not yet created root. //NOTE: Always use d.a.num_values for size because we have not yet created root.
if (this->temp.values_same_size && (this->temp.d.a.num_values == 0 || value.get_dmtdatain_t_size() == this->temp.value_length)) { if (this->temp.values_same_size && (this->temp.d.a.num_values == 0 || value.get_size() == this->temp.value_length)) {
this->temp.insert_at_array_end<false>(value); this->temp.insert_at_array_end<false>(value);
return; return;
} }
...@@ -1201,8 +1201,8 @@ void dmt<dmtdata_t, dmtdataout_t>::builder::append(const dmtdatain_t &value) { ...@@ -1201,8 +1201,8 @@ void dmt<dmtdata_t, dmtdataout_t>::builder::append(const dmtdatain_t &value) {
// Copy over and get node_offsets // Copy over and get node_offsets
for (uint32_t i = 0; i < num_values; i++) { for (uint32_t i = 0; i < num_values; i++) {
dmtdatain_t functor(this->temp.value_length, this->temp.get_array_value_internal(&old_mp, i)); dmtwriter_t writer(this->temp.value_length, this->temp.get_array_value_internal(&old_mp, i));
this->sorted_node_offsets[i] = this->temp.node_malloc_and_set_value(functor); this->sorted_node_offsets[i] = this->temp.node_malloc_and_set_value(writer);
} }
this->temp.is_array = false; this->temp.is_array = false;
this->temp.values_same_size = false; this->temp.values_same_size = false;
...@@ -1213,14 +1213,14 @@ void dmt<dmtdata_t, dmtdataout_t>::builder::append(const dmtdatain_t &value) { ...@@ -1213,14 +1213,14 @@ void dmt<dmtdata_t, dmtdataout_t>::builder::append(const dmtdatain_t &value) {
this->sorted_node_offsets[this->temp.d.a.num_values++] = this->temp.node_malloc_and_set_value(value); this->sorted_node_offsets[this->temp.d.a.num_values++] = this->temp.node_malloc_and_set_value(value);
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
bool dmt<dmtdata_t, dmtdataout_t>::builder::value_length_is_fixed(void) { bool dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::builder::value_length_is_fixed(void) {
paranoid_invariant(this->temp_valid); paranoid_invariant(this->temp_valid);
return this->temp.values_same_size; return this->temp.values_same_size;
} }
template<typename dmtdata_t, typename dmtdataout_t> template<typename dmtdata_t, typename dmtdataout_t, typename dmtwriter_t>
void dmt<dmtdata_t, dmtdataout_t>::builder::build(dmt<dmtdata_t, dmtdataout_t> *dest) { void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::builder::build(dmt<dmtdata_t, dmtdataout_t, dmtwriter_t> *dest) {
invariant(this->temp_valid); invariant(this->temp_valid);
//NOTE: Always use d.a.num_values for size because we have not yet created root. //NOTE: Always use d.a.num_values for size because we have not yet created root.
invariant(this->temp.d.a.num_values <= this->max_values); invariant(this->temp.d.a.num_values <= this->max_values);
......
...@@ -197,43 +197,40 @@ class dmt_node_templated { ...@@ -197,43 +197,40 @@ class dmt_node_templated {
using namespace toku::dmt_internal; using namespace toku::dmt_internal;
// Each data type used in a dmt requires a dmt_functor (allows you to insert/etc with dynamic sized types). // Each data type used in a dmt requires a dmt_writer class (allows you to insert/etc with dynamic sized types).
// There is no default implementation. // There is no default implementation.
template<typename dmtdata_t> // A dmtwriter instance handles reading/writing 'dmtdata_t's to/from the dmt.
class dmt_functor { // The class must implement the following functions:
// Ensures that if you forget to use partial specialization this compile error will remind you to use it. // The size required in a dmt for the dmtdata_t represented:
// We would use static_assert(false, ...) here except that it would cause a compile error even if dmt_functor<> // size_t get_size(void) const;
// We instead use an expression that evaluates to false that the compiler won't evaluate unless dmt_functor<> is used. // Write the dmtdata_t to memory owned by a dmt:
static_assert(!std::is_same<dmtdata_t, dmtdata_t>::value, "Cannot use default dmt_functor<>. Use partial specialization."); // void write_to(dmtdata_t *const dest) const;
// Defines the interface: // Constructor (others are allowed, but this one is required)
static size_t get_dmtdata_t_size(const dmtdata_t &) { return 0; } // dmtwriter(const uint32_t dmtdata_t_len, dmtdata_t *const src)
size_t get_dmtdatain_t_size(void) { return 0; }
void write_dmtdata_t_to(dmtdata_t *const dest) {}
};
template<typename dmtdata_t, template<typename dmtdata_t,
typename dmtdataout_t=dmtdata_t typename dmtdataout_t,
typename dmtwriter_t
> >
class dmt { class dmt {
private: private:
typedef dmt_node_templated<dmtdata_t> dmt_node; typedef dmt_node_templated<dmtdata_t> dmt_node;
typedef dmt_functor<dmtdata_t> dmtdatain_t;
public: public:
static const uint8_t ALIGNMENT = 4; static const uint8_t ALIGNMENT = 4;
class builder { class builder {
public: public:
void append(const dmtdatain_t &value); void append(const dmtwriter_t &value);
void create(uint32_t n_values, uint32_t n_value_bytes); void create(uint32_t n_values, uint32_t n_value_bytes);
bool value_length_is_fixed(void); bool value_length_is_fixed(void);
void build(dmt<dmtdata_t, dmtdataout_t> *dest); void build(dmt<dmtdata_t, dmtdataout_t, dmtwriter_t> *dest);
private: private:
uint32_t max_values; uint32_t max_values;
uint32_t max_value_bytes; uint32_t max_value_bytes;
node_offset *sorted_node_offsets; node_offset *sorted_node_offsets;
bool temp_valid; bool temp_valid;
dmt<dmtdata_t, dmtdataout_t> temp; dmt<dmtdata_t, dmtdataout_t, dmtwriter_t> temp;
}; };
/** /**
...@@ -306,7 +303,7 @@ class dmt { ...@@ -306,7 +303,7 @@ class dmt {
* Rationale: Some future implementation may be O(\log N) worst-case time, but O(\log N) amortized is good enough for now. * Rationale: Some future implementation may be O(\log N) worst-case time, but O(\log N) amortized is good enough for now.
*/ */
template<typename dmtcmp_t, int (*h)(const uint32_t size, const dmtdata_t &, const dmtcmp_t &)> template<typename dmtcmp_t, int (*h)(const uint32_t size, const dmtdata_t &, const dmtcmp_t &)>
int insert(const dmtdatain_t &value, const dmtcmp_t &v, uint32_t *const idx); int insert(const dmtwriter_t &value, const dmtcmp_t &v, uint32_t *const idx);
/** /**
* Effect: Increases indexes of all items at slot >= idx by 1. * Effect: Increases indexes of all items at slot >= idx by 1.
...@@ -318,7 +315,7 @@ class dmt { ...@@ -318,7 +315,7 @@ class dmt {
* Performance: time=O(\log N) amortized time. * Performance: time=O(\log N) amortized time.
* Rationale: Some future implementation may be O(\log N) worst-case time, but O(\log N) amortized is good enough for now. * Rationale: Some future implementation may be O(\log N) worst-case time, but O(\log N) amortized is good enough for now.
*/ */
int insert_at(const dmtdatain_t &value, const uint32_t idx); int insert_at(const dmtwriter_t &value, const uint32_t idx);
/** /**
* Effect: Delete the item in slot idx. * Effect: Delete the item in slot idx.
...@@ -557,9 +554,9 @@ class dmt { ...@@ -557,9 +554,9 @@ class dmt {
uint32_t nweight(const subtree &subtree) const; uint32_t nweight(const subtree &subtree) const;
node_offset node_malloc_and_set_value(const dmtdatain_t &value); node_offset node_malloc_and_set_value(const dmtwriter_t &value);
void node_set_value(dmt_node *n, const dmtdatain_t &value); void node_set_value(dmt_node *n, const dmtwriter_t &value);
void node_free(const subtree &st); void node_free(const subtree &st);
...@@ -567,15 +564,15 @@ class dmt { ...@@ -567,15 +564,15 @@ class dmt {
void convert_to_tree(void); void convert_to_tree(void);
void maybe_resize_tree(const dmtdatain_t * value); void maybe_resize_tree(const dmtwriter_t * value);
bool will_need_rebalance(const subtree &subtree, const int leftmod, const int rightmod) const; bool will_need_rebalance(const subtree &subtree, const int leftmod, const int rightmod) const;
__attribute__((nonnull)) __attribute__((nonnull))
void insert_internal(subtree *const subtreep, const dmtdatain_t &value, const uint32_t idx, subtree **const rebalance_subtree); void insert_internal(subtree *const subtreep, const dmtwriter_t &value, const uint32_t idx, subtree **const rebalance_subtree);
template<bool with_resize> template<bool with_resize>
int insert_at_array_end(const dmtdatain_t& value_in); int insert_at_array_end(const dmtwriter_t& value_in);
dmtdata_t * alloc_array_value_end(void); dmtdata_t * alloc_array_value_end(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment