Commit 4af43d5a authored by Leif Walsh's avatar Leif Walsh Committed by Yoni Fogel

[t:3988] fixing changes I blew away with commit 36862

git-svn-id: file:///svn/toku/tokudb@36918 c7de825b-a66e-492c-adef-691d508d4ae1
parent fbfbf4af
......@@ -653,6 +653,7 @@ int toku_pin_brtnode (BRT brt, BLOCKNUM blocknum, u_int32_t fullhash,
UNLOCKERS unlockers,
ANCESTORS ancestors, struct pivot_bounds const * const pbounds,
struct brtnode_fetch_extra *bfe,
BOOL apply_ancestor_messages, // this BOOL is probably temporary, for #3972, once we know how range query estimates work, will revisit this
BRTNODE *node_p)
__attribute__((__warn_unused_result__));
void toku_pin_brtnode_holding_lock (BRT brt, BLOCKNUM blocknum, u_int32_t fullhash,
......@@ -748,6 +749,12 @@ typedef struct brt_status {
uint64_t search_root_retries; // number of searches that required the root node to be fetched more than once
uint64_t search_tries_gt_height; // number of searches that required more tries than the height of the tree
uint64_t search_tries_gt_heightplus3; // number of searches that required more tries than the height of the tree plus three
uint64_t disk_flush_leaf; // number of leaf nodes flushed to disk, not for checkpoint
uint64_t disk_flush_nonleaf; // number of nonleaf nodes flushed to disk, not for checkpoint
uint64_t disk_flush_leaf_for_checkpoint; // number of leaf nodes flushed to disk for checkpoint
uint64_t disk_flush_nonleaf_for_checkpoint; // number of nonleaf nodes flushed to disk for checkpoint
uint64_t destroy_leaf; // number of leaf nodes destroyed
uint64_t destroy_nonleaf; // number of nonleaf nodes destroyed
uint64_t cleaner_total_nodes; // total number of nodes whose buffers are potentially flushed by cleaner thread
uint64_t cleaner_h1_nodes; // number of nodes of height one whose message buffers are flushed by cleaner thread
uint64_t cleaner_hgt1_nodes; // number of nodes of height > 1 whose message buffers are flushed by cleaner thread
......
......@@ -289,6 +289,7 @@ int toku_pin_brtnode (BRT brt, BLOCKNUM blocknum, u_int32_t fullhash,
UNLOCKERS unlockers,
ANCESTORS ancestors, struct pivot_bounds const * const bounds,
struct brtnode_fetch_extra *bfe,
BOOL apply_ancestor_messages, // this BOOL is probably temporary, for #3972, once we know how range query estimates work, will revisit this
BRTNODE *node_p) {
void *node_v;
int r = toku_cachetable_get_and_pin_nonblocking(
......@@ -309,7 +310,9 @@ int toku_pin_brtnode (BRT brt, BLOCKNUM blocknum, u_int32_t fullhash,
unlockers);
if (r==0) {
BRTNODE node = node_v;
if (apply_ancestor_messages) {
maybe_apply_ancestors_messages_to_node(brt, node, ancestors, bounds);
}
*node_p = node;
// printf("%*sPin %ld\n", 8-node->height, "", blocknum.b);
} else {
......@@ -494,7 +497,7 @@ long
toku_bnc_memory_size(NONLEAF_CHILDINFO bnc)
{
return (sizeof(*bnc) +
toku_fifo_memory_size(bnc->buffer) +
toku_fifo_memory_size_in_use(bnc->buffer) +
toku_omt_memory_size(bnc->fresh_message_tree) +
toku_omt_memory_size(bnc->stale_message_tree) +
toku_omt_memory_size(bnc->broadcast_list));
......@@ -674,6 +677,7 @@ void toku_brtnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename
struct brt_header *h = extraargs;
BRTNODE brtnode = brtnode_v;
assert(brtnode->thisnodename.b==nodename.b);
int height = brtnode->height;
//printf("%s:%d %p->mdict[0]=%p\n", __FILE__, __LINE__, brtnode, brtnode->mdicts[0]);
if (write_me) {
if (!h->panic) { // if the brt panicked, stop writing, otherwise try to write it.
......@@ -692,6 +696,18 @@ void toku_brtnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename
}
}
}
if (height == 0) { // statistics incremented only when disk I/O is done, so worth the threadsafe count
if (for_checkpoint)
(void) toku_sync_fetch_and_increment_uint64(&brt_status.disk_flush_leaf_for_checkpoint);
else
(void) toku_sync_fetch_and_increment_uint64(&brt_status.disk_flush_leaf);
}
else {
if (for_checkpoint)
(void) toku_sync_fetch_and_increment_uint64(&brt_status.disk_flush_nonleaf_for_checkpoint);
else
(void) toku_sync_fetch_and_increment_uint64(&brt_status.disk_flush_nonleaf);
}
}
//printf("%s:%d %p->mdict[0]=%p\n", __FILE__, __LINE__, brtnode, brtnode->mdicts[0]);
*new_size = make_brtnode_pair_attr(brtnode);
......@@ -1160,6 +1176,9 @@ void toku_brtnode_free (BRTNODE *nodep) {
toku_mempool_destroy(mp);
}
}
toku_sync_fetch_and_increment_uint64(&brt_status.destroy_leaf);
} else {
toku_sync_fetch_and_increment_uint64(&brt_status.destroy_nonleaf);
}
toku_destroy_brtnode_internals(node);
toku_free(node);
......@@ -2177,7 +2196,7 @@ static int do_update(brt_update_func update_fun, DESCRIPTOR desc, BASEMENTNODE b
if (cmd->type == BRT_UPDATE) {
// key is passed in with command (should be same as from le)
// update function extra is passed in with command
add_to_brt_status(&brt_status.updates,1);
toku_sync_fetch_and_increment_uint64(&brt_status.updates);
keyp = cmd->u.id.key;
update_function_extra = cmd->u.id.val;
} else if (cmd->type == BRT_UPDATE_BROADCAST_ALL) {
......@@ -2186,7 +2205,7 @@ static int do_update(brt_update_func update_fun, DESCRIPTOR desc, BASEMENTNODE b
assert(le); // for broadcast updates, we just hit all leafentries
// so this cannot be null
assert(cmd->u.id.key->size == 0);
add_to_brt_status(&brt_status.updates_broadcast,1);
toku_sync_fetch_and_increment_uint64(&brt_status.updates_broadcast);
keyp = toku_fill_dbt(&key, le_key(le), le_keylen(le));
update_function_extra = cmd->u.id.val;
} else {
......@@ -3180,22 +3199,22 @@ maybe_destroy_child_blbs(BRTNODE node, BRTNODE child)
static void
update_flush_status(BRTNODE UU(parent), BRTNODE child, int cascades)
{
__sync_fetch_and_add(&brt_status.flush_total, 1);
toku_sync_fetch_and_increment_uint64(&brt_status.flush_total);
if (cascades > 0) {
__sync_fetch_and_add(&brt_status.flush_cascades, 1);
toku_sync_fetch_and_increment_uint64(&brt_status.flush_cascades);
switch (cascades) {
case 1:
__sync_fetch_and_add(&brt_status.flush_cascades_1, 1); break;
toku_sync_fetch_and_increment_uint64(&brt_status.flush_cascades_1); break;
case 2:
__sync_fetch_and_add(&brt_status.flush_cascades_2, 1); break;
toku_sync_fetch_and_increment_uint64(&brt_status.flush_cascades_2); break;
case 3:
__sync_fetch_and_add(&brt_status.flush_cascades_3, 1); break;
toku_sync_fetch_and_increment_uint64(&brt_status.flush_cascades_3); break;
case 4:
__sync_fetch_and_add(&brt_status.flush_cascades_4, 1); break;
toku_sync_fetch_and_increment_uint64(&brt_status.flush_cascades_4); break;
case 5:
__sync_fetch_and_add(&brt_status.flush_cascades_5, 1); break;
toku_sync_fetch_and_increment_uint64(&brt_status.flush_cascades_5); break;
default:
__sync_fetch_and_add(&brt_status.flush_cascades_gt_5, 1); break;
toku_sync_fetch_and_increment_uint64(&brt_status.flush_cascades_gt_5); break;
}
}
bool flush_needs_io = false;
......@@ -3205,9 +3224,9 @@ update_flush_status(BRTNODE UU(parent), BRTNODE child, int cascades)
}
}
if (flush_needs_io) {
__sync_fetch_and_add(&brt_status.flush_needed_io, 1);
toku_sync_fetch_and_increment_uint64(&brt_status.flush_needed_io);
} else {
__sync_fetch_and_add(&brt_status.flush_in_memory, 1);
toku_sync_fetch_and_increment_uint64(&brt_status.flush_in_memory);
}
}
......@@ -3473,7 +3492,7 @@ void toku_apply_cmd_to_leaf(
snapshot_txnids,
live_list_reverse);
} else {
add_to_brt_status(&brt_status.msn_discards,1);
toku_sync_fetch_and_increment_uint64(&brt_status.msn_discards);
}
}
}
......@@ -3496,7 +3515,7 @@ void toku_apply_cmd_to_leaf(
live_list_reverse);
if (bn_made_change) *made_change = 1;
} else {
add_to_brt_status(&brt_status.msn_discards,1);
toku_sync_fetch_and_increment_uint64(&brt_status.msn_discards);
}
}
}
......@@ -5795,7 +5814,7 @@ do_brt_leaf_put_cmd(BRT t, BASEMENTNODE bn, BRTNODE ancestor, int childnum, OMT
}
brt_leaf_put_cmd(t->compare_fun, t->update_fun, &t->h->descriptor, bn, &brtcmd, &made_change, &BP_WORKDONE(ancestor, childnum), snapshot_txnids, live_list_reverse);
} else {
add_to_brt_status(&brt_status.msn_discards,1);
toku_sync_fetch_and_increment_uint64(&brt_status.msn_discards);
}
}
......@@ -6313,6 +6332,7 @@ brt_search_child(BRT brt, BRTNODE node, int childnum, brt_search_t *search, BRT_
unlockers,
&next_ancestors, bounds,
&bfe,
TRUE,
&childnode);
if (rr==TOKUDB_TRY_AGAIN) return rr;
assert(rr==0);
......@@ -6553,7 +6573,7 @@ try_again:
brtcursor->left_is_neg_infty,
brtcursor->right_is_pos_infty
);
r = toku_pin_brtnode(brt, *rootp, fullhash,(UNLOCKERS)NULL,(ANCESTORS)NULL, &infinite_bounds, &bfe, &node);
r = toku_pin_brtnode(brt, *rootp, fullhash,(UNLOCKERS)NULL,(ANCESTORS)NULL, &infinite_bounds, &bfe, TRUE, &node);
assert(r==0 || r== TOKUDB_TRY_AGAIN);
if (r == TOKUDB_TRY_AGAIN) {
root_tries++;
......@@ -7107,7 +7127,7 @@ static int toku_brt_keyrange_internal (BRT brt, BRTNODE node,
BLOCKNUM childblocknum = BP_BLOCKNUM(node, child_number);
u_int32_t fullhash = compute_child_fullhash(brt->cf, node, child_number);
BRTNODE childnode;
r = toku_pin_brtnode(brt, childblocknum, fullhash, unlockers, &next_ancestors, bounds, bfe, &childnode);
r = toku_pin_brtnode(brt, childblocknum, fullhash, unlockers, &next_ancestors, bounds, bfe, FALSE, &childnode);
if (r!=TOKUDB_TRY_AGAIN) {
assert(r==0);
struct unlock_brtnode_extra unlock_extra = {brt,childnode};
......@@ -7155,7 +7175,7 @@ int toku_brt_keyrange (BRT brt, DBT *key, u_int64_t *less_p, u_int64_t *equal_p,
BRTNODE node;
{
int r = toku_pin_brtnode(brt, *rootp, fullhash,(UNLOCKERS)NULL,(ANCESTORS)NULL, &infinite_bounds, &bfe, &node);
int r = toku_pin_brtnode(brt, *rootp, fullhash,(UNLOCKERS)NULL,(ANCESTORS)NULL, &infinite_bounds, &bfe, FALSE, &node);
assert(r==0 || r== TOKUDB_TRY_AGAIN);
if (r == TOKUDB_TRY_AGAIN) {
goto try_again;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment