Commit 5c9a1a4c authored by Yoni Fogel's avatar Yoni Fogel

Refs Tokutek/ft-index#46 don't (malloc|alloca)+memcpy before le_pack;

Instead just keep mempool around a little longer (like pre-refactoring).  This should
reduce large mallocs and remove a memcpy
parent 7a2e9511
......@@ -445,15 +445,12 @@ void bn_data::get_space_for_overwrite(
uint32_t keylen UU(),
uint32_t old_le_size,
uint32_t new_size,
LEAFENTRY* new_le_space
//TODO(yoni): add maybe_free return (caller will free it)
LEAFENTRY* new_le_space,
void **const maybe_free
)
{
void* maybe_free = nullptr;
LEAFENTRY new_le = mempool_malloc_and_update_dmt(new_size, &maybe_free);
if (maybe_free) {
toku_free(maybe_free);
}
*maybe_free = nullptr;
LEAFENTRY new_le = mempool_malloc_and_update_dmt(new_size, maybe_free);
toku_mempool_mfree(&m_buffer_mempool, nullptr, old_le_size);
klpair_struct* klp = nullptr;
uint32_t klpair_len;
......@@ -477,17 +474,14 @@ void bn_data::get_space_for_insert(
const void* keyp,
uint32_t keylen,
size_t size,
LEAFENTRY* new_le_space
//TODO(yoni): add maybe_free return (caller will free it). Also make callers (and tests) use it
LEAFENTRY* new_le_space,
void **const maybe_free
)
{
add_key(keylen);
void* maybe_free = nullptr;
LEAFENTRY new_le = mempool_malloc_and_update_dmt(size, &maybe_free);
if (maybe_free) {
toku_free(maybe_free);
}
*maybe_free = nullptr;
LEAFENTRY new_le = mempool_malloc_and_update_dmt(size, maybe_free);
size_t new_le_offset = toku_mempool_get_offset_from_pointer_and_base(&this->m_buffer_mempool, new_le);
klpair_dmtwriter kl(keylen, new_le_offset, keyp);
......
......@@ -306,12 +306,14 @@ class bn_data {
// Allocates space in the mempool to store a new leafentry.
// This may require reorganizing the mempool and updating the dmt.
void get_space_for_overwrite(uint32_t idx, const void* keyp, uint32_t keylen, uint32_t old_size, uint32_t new_size, LEAFENTRY* new_le_space);
__attribute__((__nonnull__))
void get_space_for_overwrite(uint32_t idx, const void* keyp, uint32_t keylen, uint32_t old_size, uint32_t new_size, LEAFENTRY* new_le_space, void **const maybe_free);
// Allocates space in the mempool to store a new leafentry
// and inserts a new key into the dmt
// This may require reorganizing the mempool and updating the dmt.
void get_space_for_insert(uint32_t idx, const void* keyp, uint32_t keylen, size_t size, LEAFENTRY* new_le_space);
__attribute__((__nonnull__))
void get_space_for_insert(uint32_t idx, const void* keyp, uint32_t keylen, size_t size, LEAFENTRY* new_le_space, void **const maybe_free);
// Gets a leafentry given a klpair from this basement node.
LEAFENTRY get_le_from_klpair(const klpair_struct *klpair) const;
......
......@@ -2061,13 +2061,18 @@ deserialize_and_upgrade_leaf_node(FTNODE node,
assert_zero(r);
// Copy the pointer value straight into the OMT
LEAFENTRY new_le_in_bn = nullptr;
void *maybe_free;
bn->data_buffer.get_space_for_insert(
i,
key,
keylen,
new_le_size,
&new_le_in_bn
&new_le_in_bn,
&maybe_free
);
if (maybe_free) {
toku_free(maybe_free);
}
memcpy(new_le_in_bn, new_le, new_le_size);
toku_free(new_le);
}
......
......@@ -115,13 +115,18 @@ le_add_to_bn(bn_data* bn, uint32_t idx, const char *key, int keylen, const char
{
LEAFENTRY r = NULL;
uint32_t size_needed = LE_CLEAN_MEMSIZE(vallen);
void *maybe_free = nullptr;
bn->get_space_for_insert(
idx,
key,
keylen,
size_needed,
&r
&r,
&maybe_free
);
if (maybe_free) {
toku_free(maybe_free);
}
resource_assert(r);
r->type = LE_CLEAN;
r->u.clean.vallen = vallen;
......
......@@ -105,13 +105,18 @@ le_add_to_bn(bn_data* bn, uint32_t idx, char *key, int keylen, char *val, int va
{
LEAFENTRY r = NULL;
uint32_t size_needed = LE_CLEAN_MEMSIZE(vallen);
void *maybe_free = nullptr;
bn->get_space_for_insert(
idx,
key,
keylen,
size_needed,
&r
&r,
&maybe_free
);
if (maybe_free) {
toku_free(maybe_free);
}
resource_assert(r);
r->type = LE_CLEAN;
r->u.clean.vallen = vallen;
......
......@@ -102,13 +102,18 @@ le_add_to_bn(bn_data* bn, uint32_t idx, const char *key, int keysize, const cha
{
LEAFENTRY r = NULL;
uint32_t size_needed = LE_CLEAN_MEMSIZE(valsize);
void *maybe_free = nullptr;
bn->get_space_for_insert(
idx,
idx,
key,
keysize,
size_needed,
&r
&r,
&maybe_free
);
if (maybe_free) {
toku_free(maybe_free);
}
resource_assert(r);
r->type = LE_CLEAN;
r->u.clean.vallen = valsize;
......
......@@ -96,13 +96,18 @@ le_add_to_bn(bn_data* bn, uint32_t idx, const char *key, int keysize, const cha
{
LEAFENTRY r = NULL;
uint32_t size_needed = LE_CLEAN_MEMSIZE(valsize);
void *maybe_free = nullptr;
bn->get_space_for_insert(
idx,
key,
keysize,
size_needed,
&r
&r,
&maybe_free
);
if (maybe_free) {
toku_free(maybe_free);
}
resource_assert(r);
r->type = LE_CLEAN;
r->u.clean.vallen = valsize;
......@@ -113,14 +118,19 @@ static void
le_overwrite(bn_data* bn, uint32_t idx, const char *key, int keysize, const char *val, int valsize) {
LEAFENTRY r = NULL;
uint32_t size_needed = LE_CLEAN_MEMSIZE(valsize);
void *maybe_free = nullptr;
bn->get_space_for_overwrite(
idx,
key,
keysize,
size_needed, // old_le_size
size_needed,
&r
&r,
&maybe_free
);
if (maybe_free) {
toku_free(maybe_free);
}
resource_assert(r);
r->type = LE_CLEAN;
r->u.clean.vallen = valsize;
......
......@@ -213,7 +213,7 @@ test_le_offsets (void) {
static void
test_ule_packs_to_nothing (ULE ule) {
LEAFENTRY le;
int r = le_pack(ule, NULL, 0, NULL, 0, 0, &le);
int r = le_pack(ule, NULL, 0, NULL, 0, 0, &le, nullptr);
assert(r==0);
assert(le==NULL);
}
......@@ -319,7 +319,7 @@ test_le_pack_committed (void) {
size_t memsize;
LEAFENTRY le;
int r = le_pack(&ule, nullptr, 0, nullptr, 0, 0, &le);
int r = le_pack(&ule, nullptr, 0, nullptr, 0, 0, &le, nullptr);
assert(r==0);
assert(le!=NULL);
memsize = le_memsize_from_ule(&ule);
......@@ -329,7 +329,7 @@ test_le_pack_committed (void) {
verify_ule_equal(&ule, &tmp_ule);
LEAFENTRY tmp_le;
size_t tmp_memsize;
r = le_pack(&tmp_ule, nullptr, 0, nullptr, 0, 0, &tmp_le);
r = le_pack(&tmp_ule, nullptr, 0, nullptr, 0, 0, &tmp_le, nullptr);
tmp_memsize = le_memsize_from_ule(&tmp_ule);
assert(r==0);
assert(tmp_memsize == memsize);
......@@ -377,7 +377,7 @@ test_le_pack_uncommitted (uint8_t committed_type, uint8_t prov_type, int num_pla
size_t memsize;
LEAFENTRY le;
int r = le_pack(&ule, nullptr, 0, nullptr, 0, 0, &le);
int r = le_pack(&ule, nullptr, 0, nullptr, 0, 0, &le, nullptr);
assert(r==0);
assert(le!=NULL);
memsize = le_memsize_from_ule(&ule);
......@@ -387,7 +387,7 @@ test_le_pack_uncommitted (uint8_t committed_type, uint8_t prov_type, int num_pla
verify_ule_equal(&ule, &tmp_ule);
LEAFENTRY tmp_le;
size_t tmp_memsize;
r = le_pack(&tmp_ule, nullptr, 0, nullptr, 0, 0, &tmp_le);
r = le_pack(&tmp_ule, nullptr, 0, nullptr, 0, 0, &tmp_le, nullptr);
tmp_memsize = le_memsize_from_ule(&tmp_ule);
assert(r==0);
assert(tmp_memsize == memsize);
......@@ -448,7 +448,7 @@ test_le_apply(ULE ule_initial, FT_MSG msg, ULE ule_expected) {
LEAFENTRY le_expected;
LEAFENTRY le_result;
r = le_pack(ule_initial, nullptr, 0, nullptr, 0, 0, &le_initial);
r = le_pack(ule_initial, nullptr, 0, nullptr, 0, 0, &le_initial, nullptr);
CKERR(r);
size_t result_memsize = 0;
......@@ -467,7 +467,7 @@ test_le_apply(ULE ule_initial, FT_MSG msg, ULE ule_expected) {
}
size_t expected_memsize = 0;
r = le_pack(ule_expected, nullptr, 0, nullptr, 0, 0, &le_expected);
r = le_pack(ule_expected, nullptr, 0, nullptr, 0, 0, &le_expected, nullptr);
CKERR(r);
if (le_expected) {
expected_memsize = leafentry_memsize(le_expected);
......@@ -749,7 +749,7 @@ test_le_apply_messages(void) {
static bool ule_worth_running_garbage_collection(ULE ule, TXNID oldest_referenced_xid_known) {
LEAFENTRY le;
int r = le_pack(ule, nullptr, 0, nullptr, 0, 0, &le); CKERR(r);
int r = le_pack(ule, nullptr, 0, nullptr, 0, 0, &le, nullptr); CKERR(r);
invariant_notnull(le);
bool worth_running = toku_le_worth_running_garbage_collection(le, oldest_referenced_xid_known);
toku_free(le);
......
......@@ -119,13 +119,18 @@ le_add_to_bn(bn_data* bn, uint32_t idx, const char *key, int keysize, const cha
{
LEAFENTRY r = NULL;
uint32_t size_needed = LE_CLEAN_MEMSIZE(valsize);
void *maybe_free = nullptr;
bn->get_space_for_insert(
idx,
key,
keysize,
size_needed,
&r
&r,
&maybe_free
);
if (maybe_free) {
toku_free(maybe_free);
}
resource_assert(r);
r->type = LE_CLEAN;
r->u.clean.vallen = valsize;
......
......@@ -149,7 +149,8 @@ le_pack(ULE ule, // data to be packed into new leafentry
void* keyp,
uint32_t keylen,
uint32_t old_le_size,
LEAFENTRY * const new_leafentry_p // this is what this function creates
LEAFENTRY * const new_leafentry_p, // this is what this function creates
void **const maybe_free
);
......
......@@ -242,20 +242,21 @@ static void get_space_for_le(
uint32_t keylen,
uint32_t old_le_size,
size_t size,
LEAFENTRY* new_le_space
LEAFENTRY* new_le_space,
void **const maybe_free
)
{
if (data_buffer == NULL) {
if (data_buffer == nullptr) {
CAST_FROM_VOIDP(*new_le_space, toku_xmalloc(size));
}
else {
// this means we are overwriting something
if (old_le_size > 0) {
data_buffer->get_space_for_overwrite(idx, keyp, keylen, old_le_size, size, new_le_space);
data_buffer->get_space_for_overwrite(idx, keyp, keylen, old_le_size, size, new_le_space, maybe_free);
}
// this means we are inserting something new
else {
data_buffer->get_space_for_insert(idx, keyp, keylen, size, new_le_space);
data_buffer->get_space_for_insert(idx, keyp, keylen, size, new_le_space, maybe_free);
}
}
}
......@@ -470,23 +471,17 @@ toku_le_apply_msg(FT_MSG msg,
int64_t newnumbytes = 0;
uint64_t oldmemsize = 0;
uint32_t keylen = ft_msg_get_keylen(msg);
LEAFENTRY copied_old_le = NULL;
size_t old_le_size = old_leafentry ? leafentry_memsize(old_leafentry) : 0;
toku::scoped_malloc copied_old_le_buf(old_le_size);
if (old_leafentry) {
CAST_FROM_VOIDP(copied_old_le, copied_old_le_buf.get());
memcpy(copied_old_le, old_leafentry, old_le_size);
}
if (old_leafentry == NULL) {
msg_init_empty_ule(&ule);
} else {
oldmemsize = leafentry_memsize(old_leafentry);
le_unpack(&ule, copied_old_le); // otherwise unpack leafentry
le_unpack(&ule, old_leafentry); // otherwise unpack leafentry
oldnumbytes = ule_get_innermost_numbytes(&ule, keylen);
}
msg_modify_ule(&ule, msg); // modify unpacked leafentry
ule_simple_garbage_collection(&ule, oldest_referenced_xid, gc_info);
void *maybe_free = nullptr;
int rval = le_pack(
&ule, // create packed leafentry
data_buffer,
......@@ -494,7 +489,8 @@ toku_le_apply_msg(FT_MSG msg,
ft_msg_get_key(msg), // contract of this function is caller has this set, always
keylen, // contract of this function is caller has this set, always
oldmemsize,
new_leafentry_p
new_leafentry_p,
&maybe_free
);
invariant_zero(rval);
if (*new_leafentry_p) {
......@@ -502,6 +498,9 @@ toku_le_apply_msg(FT_MSG msg,
}
*numbytes_delta_p = newnumbytes - oldnumbytes;
ule_cleanup(&ule);
if (maybe_free) {
toku_free(maybe_free);
}
}
bool toku_le_worth_running_garbage_collection(LEAFENTRY le, TXNID oldest_referenced_xid_known) {
......@@ -557,15 +556,8 @@ toku_le_garbage_collect(LEAFENTRY old_leaf_entry,
ULE_S ule;
int64_t oldnumbytes = 0;
int64_t newnumbytes = 0;
LEAFENTRY copied_old_le = NULL;
size_t old_le_size = old_leaf_entry ? leafentry_memsize(old_leaf_entry) : 0;
toku::scoped_malloc copied_old_le_buf(old_le_size);
if (old_leaf_entry) {
CAST_FROM_VOIDP(copied_old_le, copied_old_le_buf.get());
memcpy(copied_old_le, old_leaf_entry, old_le_size);
}
le_unpack(&ule, copied_old_le);
le_unpack(&ule, old_leaf_entry);
oldnumbytes = ule_get_innermost_numbytes(&ule, keylen);
uint32_t old_mem_size = leafentry_memsize(old_leaf_entry);
......@@ -580,6 +572,7 @@ toku_le_garbage_collect(LEAFENTRY old_leaf_entry,
ule_try_promote_provisional_outermost(&ule, oldest_possible_live_xid);
ule_garbage_collect(&ule, snapshot_xids, referenced_xids, live_root_txns);
void *maybe_free = nullptr;
int r = le_pack(
&ule,
data_buffer,
......@@ -587,7 +580,8 @@ toku_le_garbage_collect(LEAFENTRY old_leaf_entry,
keyp,
keylen,
old_mem_size,
new_leaf_entry
new_leaf_entry,
&maybe_free
);
assert(r == 0);
if (*new_leaf_entry) {
......@@ -595,6 +589,9 @@ toku_le_garbage_collect(LEAFENTRY old_leaf_entry,
}
*numbytes_delta_p = newnumbytes - oldnumbytes;
ule_cleanup(&ule);
if (maybe_free) {
toku_free(maybe_free);
}
}
/////////////////////////////////////////////////////////////////////////////////
......@@ -901,7 +898,8 @@ le_pack(ULE ule, // data to be packed into new leafentry
void* keyp,
uint32_t keylen,
uint32_t old_le_size,
LEAFENTRY * const new_leafentry_p // this is what this function creates
LEAFENTRY * const new_leafentry_p, // this is what this function creates
void **const maybe_free
)
{
invariant(ule->num_cuxrs > 0);
......@@ -930,7 +928,7 @@ le_pack(ULE ule, // data to be packed into new leafentry
found_insert:
memsize = le_memsize_from_ule(ule);
LEAFENTRY new_leafentry;
get_space_for_le(data_buffer, idx, keyp, keylen, old_le_size, memsize, &new_leafentry);
get_space_for_le(data_buffer, idx, keyp, keylen, old_le_size, memsize, &new_leafentry, maybe_free);
//p always points to first unused byte after leafentry we are packing
uint8_t *p;
......@@ -2393,12 +2391,14 @@ toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry,
// malloc instead of a mempool. However after supporting upgrade,
// we need to use mempools and the OMT.
rval = le_pack(&ule, // create packed leafentry
NULL,
nullptr,
0, //only matters if we are passing in a bn_data
NULL, //only matters if we are passing in a bn_data
nullptr, //only matters if we are passing in a bn_data
0, //only matters if we are passing in a bn_data
0, //only matters if we are passing in a bn_data
new_leafentry_p);
new_leafentry_p,
nullptr //only matters if we are passing in a bn_data
);
ule_cleanup(&ule);
*new_leafentry_memorysize = leafentry_memsize(*new_leafentry_p);
return rval;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment