Commit da099741 authored by John Esmet's avatar John Esmet Committed by Yoni Fogel

fixes #6113 add instrumentation to the locktree, including memory stats,...

fixes #6113 add instrumentation to the locktree, including memory stats, escalation stats, and single txnid optimization stats


git-svn-id: file:///svn/toku/tokudb@53781 c7de825b-a66e-492c-adef-691d508d4ae1
parent 998de3db
......@@ -48,6 +48,8 @@ void locktree::create(manager::memory_tracker *mem_tracker, DICTIONARY_ID dict_i
m_sto_txnid = TXNID_NONE;
m_sto_buffer.create();
m_sto_score = STO_SCORE_THRESHOLD;
m_sto_end_early_count = 0;
m_sto_end_early_time = 0;
m_lock_request_info.pending_lock_requests.create();
ZERO_STRUCT(m_lock_request_info.mutex);
......@@ -169,12 +171,22 @@ void locktree::sto_end(void) {
m_sto_txnid = TXNID_NONE;
}
void locktree::sto_end_early(void *prepared_lkr) {
void locktree::sto_end_early_no_accounting(void *prepared_lkr) {
sto_migrate_buffer_ranges_to_tree(prepared_lkr);
sto_end();
m_sto_score = 0;
}
void locktree::sto_end_early(void *prepared_lkr) {
m_sto_end_early_count++;
tokutime_t t0 = toku_time_now();
sto_end_early_no_accounting(prepared_lkr);
tokutime_t t1 = toku_time_now();
m_sto_end_early_time += (t1 - t0);
}
void locktree::sto_migrate_buffer_ranges_to_tree(void *prepared_lkr) {
// There should be something to migrate, and nothing in the rangetree.
invariant(!m_sto_buffer.is_empty());
......@@ -547,7 +559,9 @@ void locktree::escalate(manager::lt_escalate_cb after_escalate_callback, void *a
// if you have to run escalation, you probably don't care about
// the optimization anyway, and this makes things easier.
if (m_sto_txnid != TXNID_NONE) {
sto_end_early(&lkr);
// We are already accounting for this escalation time and
// count, so don't do it for sto_end_early too.
sto_end_early_no_accounting(&lkr);
}
// extract and remove batches of row locks from the locktree
......
......@@ -8,6 +8,7 @@
#define TOKU_LOCKTREE_H
#include <db.h>
#include <toku_time.h>
#include <toku_pthread.h>
#include <ft/fttypes.h>
......@@ -19,6 +20,25 @@
#include "wfg.h"
#include "range_buffer.h"
enum {
LTM_SIZE_CURRENT = 0,
LTM_SIZE_LIMIT,
LTM_ESCALATION_COUNT,
LTM_ESCALATION_TIME,
LTM_ESCALATION_LATEST_RESULT,
LTM_NUM_LOCKTREES,
LTM_LOCK_REQUESTS_PENDING,
LTM_STO_NUM_ELIGIBLE,
LTM_STO_END_EARLY_COUNT,
LTM_STO_END_EARLY_TIME,
LTM_STATUS_NUM_ROWS
};
typedef struct {
bool initialized;
TOKU_ENGINE_STATUS_ROW_S status[LTM_STATUS_NUM_ROWS];
} LTM_STATUS_S, *LTM_STATUS;
namespace toku {
class lock_request;
......@@ -175,6 +195,8 @@ class locktree {
// deterministicly trigger lock escalation.
void run_escalation_for_test(void);
void get_status(LTM_STATUS status);
private:
static const uint64_t DEFAULT_MAX_LOCK_MEMORY = 64L * 1024 * 1024;
static const uint64_t DEFAULT_LOCK_WAIT_TIME = 0;
......@@ -184,6 +206,11 @@ class locktree {
uint64_t m_current_lock_memory;
memory_tracker m_mem_tracker;
// statistics about lock escalation.
uint64_t m_escalation_count;
tokutime_t m_escalation_time;
uint64_t m_escalation_latest_result;
// lock wait time for blocking row locks, in ms
uint64_t m_lock_wait_time_ms;
......@@ -331,6 +358,10 @@ class locktree {
static const int STO_SCORE_THRESHOLD = 100;
int m_sto_score;
// statistics about time spent ending the STO early
uint64_t m_sto_end_early_count;
tokutime_t m_sto_end_early_time;
// effect: begins the single txnid optimizaiton, setting m_sto_txnid
// to the given txnid.
// requires: m_sto_txnid is invalid
......@@ -352,6 +383,7 @@ class locktree {
// sto_score back to zero.
// requires: m_sto_txnid is valid
void sto_end_early(void *prepared_lkr);
void sto_end_early_no_accounting(void *prepared_lkr);
// params: prepared_lkr is a void * to a prepared locked keyrange. we can't use
// the real type because the compiler won't allow us to forward declare
......
......@@ -15,6 +15,9 @@ namespace toku {
void locktree::manager::create(lt_create_cb create_cb, lt_destroy_cb destroy_cb, lt_escalate_cb escalate_cb, void *escalate_extra) {
m_max_lock_memory = DEFAULT_MAX_LOCK_MEMORY;
m_current_lock_memory = 0;
m_escalation_count = 0;
m_escalation_time = 0;
m_escalation_latest_result = 0;
m_lock_wait_time_ms = DEFAULT_LOCK_WAIT_TIME;
m_mem_tracker.set_manager(this);
......@@ -216,6 +219,7 @@ void locktree::manager::run_escalation(void) {
// doing so would require some layering hackery (or a callback)
// and more complicated locking. for now, just escalate each
// locktree individually, in-place.
tokutime_t t0 = toku_time_now();
size_t num_locktrees = m_locktree_map.size();
for (size_t i = 0; i < num_locktrees; i++) {
locktree *lt;
......@@ -223,6 +227,11 @@ void locktree::manager::run_escalation(void) {
invariant_zero(r);
lt->escalate(m_lt_escalate_callback, m_lt_escalate_callback_extra);
}
tokutime_t t1 = toku_time_now();
m_escalation_count++;
m_escalation_time += (t1 - t0);
m_escalation_latest_result = m_current_lock_memory;
}
void locktree::manager::memory_tracker::set_manager(manager *mgr) {
......@@ -260,4 +269,50 @@ bool locktree::manager::memory_tracker::out_of_locks(void) const {
return m_mgr->m_current_lock_memory >= m_mgr->m_max_lock_memory;
}
#define STATUS_SET(s, k, t, n, l) \
s->status[k].keyname = #k; \
s->status[k].type = t; \
s->status[k].value.num = n; \
s->status[k].legend = "locktree: " l;
void locktree::manager::get_status(LTM_STATUS status) {
STATUS_SET(status, LTM_SIZE_CURRENT, UINT64, m_current_lock_memory, "memory size");
STATUS_SET(status, LTM_SIZE_LIMIT, UINT64, m_max_lock_memory, "memory size limit");
STATUS_SET(status, LTM_ESCALATION_COUNT, UINT64, m_escalation_count, "number of times lock escalation ran");
STATUS_SET(status, LTM_ESCALATION_TIME, TOKUTIME, m_escalation_time, "time spent running escalation (seconds)");
STATUS_SET(status, LTM_ESCALATION_LATEST_RESULT, UINT64, m_escalation_latest_result, "latest post-escalation memory size");
mutex_lock();
uint64_t lock_requests_pending = 0;
uint64_t sto_num_eligible = 0;
uint64_t sto_end_early_count = 0;
tokutime_t sto_end_early_time = 0;
size_t num_locktrees = m_locktree_map.size();
for (size_t i = 0; i < num_locktrees; i++) {
locktree *lt;
int r = m_locktree_map.fetch(i, &lt);
invariant_zero(r);
toku_mutex_lock(&lt->m_lock_request_info.mutex);
lock_requests_pending += lt->get_lock_request_info()->pending_lock_requests.size();
toku_mutex_unlock(&lt->m_lock_request_info.mutex);
sto_num_eligible += lt->sto_txnid_is_valid_unsafe() ? 1 : 0;
sto_end_early_count += lt->m_sto_end_early_count;
sto_end_early_time += lt->m_sto_end_early_time;
}
mutex_unlock();
STATUS_SET(status, LTM_NUM_LOCKTREES, UINT64, num_locktrees, "number of locktrees open now");
STATUS_SET(status, LTM_LOCK_REQUESTS_PENDING, UINT64, lock_requests_pending, "number of pending lock requests");
STATUS_SET(status, LTM_STO_NUM_ELIGIBLE, UINT64, sto_num_eligible, "number of locktrees eligible for the STO");
STATUS_SET(status, LTM_STO_END_EARLY_COUNT, UINT64, sto_end_early_count, "number of times a locktree ended the STO early");
STATUS_SET(status, LTM_STO_END_EARLY_TIME, TOKUTIME, sto_end_early_time, "time spent ending the STO early");
}
#undef STATUS_SET
} /* namespace toku */
......@@ -26,6 +26,8 @@ void locktree_unit_test::test_create_destroy(void) {
invariant(lt->m_rangetree != nullptr);
invariant(lt->m_userdata == nullptr);
invariant(info->pending_lock_requests.size() == 0);
invariant(lt->m_sto_end_early_count == 0);
invariant(lt->m_sto_end_early_time == 0);
mgr.release_lt(lt);
mgr.destroy();
......
......@@ -21,6 +21,9 @@ void manager_unit_test::test_create_destroy(void) {
invariant(mgr.m_max_lock_memory == locktree::manager::DEFAULT_MAX_LOCK_MEMORY);
invariant(mgr.m_current_lock_memory == 0);
invariant(mgr.m_escalation_count == 0);
invariant(mgr.m_escalation_time == 0);
invariant(mgr.m_escalation_latest_result == 0);
invariant(mgr.m_lock_wait_time_ms == locktree::manager::DEFAULT_LOCK_WAIT_TIME);
invariant(mgr.m_locktree_map.size() == 0);
......
......@@ -1781,10 +1781,7 @@ env_get_engine_status_num_rows (DB_ENV * UU(env), uint64_t * num_rowsp) {
num_rows += LE_STATUS_NUM_ROWS;
num_rows += CP_STATUS_NUM_ROWS;
num_rows += CT_STATUS_NUM_ROWS;
// TODO: 5416 determine necessary locktree statistics
#if 0
num_rows += LTM_STATUS_NUM_ROWS;
#endif
num_rows += FT_STATUS_NUM_ROWS;
num_rows += FT_FLUSHER_STATUS_NUM_ROWS;
num_rows += FT_HOT_STATUS_NUM_ROWS;
......@@ -1873,6 +1870,13 @@ env_get_engine_status (DB_ENV * env, TOKU_ENGINE_STATUS_ROW engstat, uint64_t ma
engstat[row++] = ctstat.status[i];
}
}
{
LTM_STATUS_S ltmstat;
env->i->ltm.get_status(&ltmstat);
for (int i = 0; i < LTM_STATUS_NUM_ROWS && row < maxrows; i++) {
engstat[row++] = ltmstat.status[i];
}
}
{
FT_STATUS_S ftstat;
toku_ft_get_status(&ftstat);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment