Commit 3e2b3f89 authored by John Esmet's avatar John Esmet

fixes #206 Factor out inner classes to be their own classes. Relax some

abstractions and strengthen others, with an eye for simplicity and
consistency, not over-encapsulation.
parent c1cc6c36
......@@ -338,7 +338,7 @@ int lock_request::retry(void) {
}
void lock_request::retry_all_lock_requests(locktree *lt) {
locktree::lt_lock_request_info *info = lt->get_lock_request_info();
lt_lock_request_info *info = lt->get_lock_request_info();
// if a thread reads this bit to be true, then it should go ahead and
// take the locktree mutex and retry lock requests. we use this bit
......
......@@ -202,7 +202,7 @@ class lock_request {
// the lock request info state stored in the
// locktree that this lock request is for.
struct locktree::lt_lock_request_info *m_info;
struct lt_lock_request_info *m_info;
// effect: tries again to acquire the lock described by this lock request
// returns: 0 if retrying the request succeeded and is now complete
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -117,13 +117,10 @@ static int my_killed_callback(void) {
// make sure deadlocks are detected when a lock request starts
void lock_request_unit_test::test_wait_time_callback(void) {
int r;
locktree::manager mgr;
locktree *lt;
mgr.create(nullptr, nullptr, nullptr, nullptr);
locktree lt;
DICTIONARY_ID dict_id = { 1 };
lt = mgr.get_lt(dict_id, nullptr, compare_dbts, nullptr);
lt.create(nullptr, dict_id, nullptr, compare_dbts);
TXNID txnid_a = 1001;
lock_request request_a;
......@@ -136,12 +133,12 @@ void lock_request_unit_test::test_wait_time_callback(void) {
const DBT *one = get_dbt(1);
// a locks 'one'
request_a.set(lt, txnid_a, one, one, lock_request::type::WRITE, false);
request_a.set(&lt, txnid_a, one, one, lock_request::type::WRITE, false);
r = request_a.start();
assert_zero(r);
// b tries to lock 'one'
request_b.set(lt, txnid_b, one, one, lock_request::type::WRITE, false);
request_b.set(&lt, txnid_b, one, one, lock_request::type::WRITE, false);
r = request_b.start();
assert(r == DB_LOCK_NOTGRANTED);
......@@ -162,11 +159,9 @@ void lock_request_unit_test::test_wait_time_callback(void) {
request_b.destroy();
release_lock_and_retry_requests(lt, txnid_a, one, one);
release_lock_and_retry_requests(&lt, txnid_a, one, one);
request_a.destroy();
mgr.release_lt(lt);
mgr.destroy();
lt.create(nullptr, dict_id, nullptr, compare_dbts);
}
} /* namespace toku */
......
......@@ -114,13 +114,10 @@ static int my_killed_callback(void) {
// make sure deadlocks are detected when a lock request starts
void lock_request_unit_test::test_wait_time_callback(void) {
int r;
locktree::manager mgr;
locktree *lt;
mgr.create(nullptr, nullptr, nullptr, nullptr);
locktree lt;
DICTIONARY_ID dict_id = { 1 };
lt = mgr.get_lt(dict_id, nullptr, compare_dbts, nullptr);
lt.create(nullptr, dict_id, nullptr, compare_dbts);
TXNID txnid_a = 1001;
lock_request request_a;
......@@ -133,12 +130,12 @@ void lock_request_unit_test::test_wait_time_callback(void) {
const DBT *one = get_dbt(1);
// a locks 'one'
request_a.set(lt, txnid_a, one, one, lock_request::type::WRITE, false);
request_a.set(&lt, txnid_a, one, one, lock_request::type::WRITE, false);
r = request_a.start();
assert_zero(r);
// b tries to lock 'one'
request_b.set(lt, txnid_b, one, one, lock_request::type::WRITE, false);
request_b.set(&lt, txnid_b, one, one, lock_request::type::WRITE, false);
r = request_b.start();
assert(r == DB_LOCK_NOTGRANTED);
......@@ -158,11 +155,8 @@ void lock_request_unit_test::test_wait_time_callback(void) {
request_b.destroy();
release_lock_and_retry_requests(lt, txnid_a, one, one);
release_lock_and_retry_requests(&lt, txnid_a, one, one);
request_a.destroy();
mgr.release_lt(lt);
mgr.destroy();
}
} /* namespace toku */
......
......@@ -96,14 +96,13 @@ namespace toku {
// make sure deadlocks are detected when a lock request starts
void lock_request_unit_test::test_start_deadlock(void) {
int r;
locktree::manager mgr;
locktree *lt;
locktree lt;
// something short
const uint64_t lock_wait_time = 10;
mgr.create(nullptr, nullptr, nullptr, nullptr);
DICTIONARY_ID dict_id = { 1 };
lt = mgr.get_lt(dict_id, nullptr, compare_dbts, nullptr);
lt.create(nullptr, dict_id, nullptr, compare_dbts);
TXNID txnid_a = 1001;
TXNID txnid_b = 2001;
......@@ -119,30 +118,30 @@ void lock_request_unit_test::test_start_deadlock(void) {
const DBT *two = get_dbt(2);
// start and succeed 1,1 for A and 2,2 for B.
request_a.set(lt, txnid_a, one, one, lock_request::type::WRITE, false);
request_a.set(&lt, txnid_a, one, one, lock_request::type::WRITE, false);
r = request_a.start();
invariant_zero(r);
request_b.set(lt, txnid_b, two, two, lock_request::type::WRITE, false);
request_b.set(&lt, txnid_b, two, two, lock_request::type::WRITE, false);
r = request_b.start();
invariant_zero(r);
// txnid A should not be granted a lock on 2,2, so it goes pending.
request_a.set(lt, txnid_a, two, two, lock_request::type::WRITE, false);
request_a.set(&lt, txnid_a, two, two, lock_request::type::WRITE, false);
r = request_a.start();
invariant(r == DB_LOCK_NOTGRANTED);
// if txnid B wants a lock on 1,1 it should deadlock with A
request_b.set(lt, txnid_b, one, one, lock_request::type::WRITE, false);
request_b.set(&lt, txnid_b, one, one, lock_request::type::WRITE, false);
r = request_b.start();
invariant(r == DB_LOCK_DEADLOCK);
// txnid C should not deadlock on either of these - it should just time out.
request_c.set(lt, txnid_c, one, one, lock_request::type::WRITE, false);
request_c.set(&lt, txnid_c, one, one, lock_request::type::WRITE, false);
r = request_c.start();
invariant(r == DB_LOCK_NOTGRANTED);
r = request_c.wait(lock_wait_time);
invariant(r == DB_LOCK_NOTGRANTED);
request_c.set(lt, txnid_c, two, two, lock_request::type::WRITE, false);
request_c.set(&lt, txnid_c, two, two, lock_request::type::WRITE, false);
r = request_c.start();
invariant(r == DB_LOCK_NOTGRANTED);
r = request_c.wait(lock_wait_time);
......@@ -150,17 +149,15 @@ void lock_request_unit_test::test_start_deadlock(void) {
// release locks for A and B, then wait on A's request which should succeed
// since B just unlocked and should have completed A's pending request.
release_lock_and_retry_requests(lt, txnid_a, one, one);
release_lock_and_retry_requests(lt, txnid_b, two, two);
release_lock_and_retry_requests(&lt, txnid_a, one, one);
release_lock_and_retry_requests(&lt, txnid_b, two, two);
r = request_a.wait(lock_wait_time);
invariant_zero(r);
release_lock_and_retry_requests(lt, txnid_a, two, two);
release_lock_and_retry_requests(&lt, txnid_a, two, two);
request_a.destroy();
request_b.destroy();
request_c.destroy();
mgr.release_lt(lt);
mgr.destroy();
}
} /* namespace toku */
......
......@@ -97,13 +97,11 @@ namespace toku {
// stored in the lock request set as pending.
void lock_request_unit_test::test_start_pending(void) {
int r;
locktree::manager mgr;
locktree *lt;
locktree lt;
lock_request request;
mgr.create(nullptr, nullptr, nullptr, nullptr);
DICTIONARY_ID dict_id = { 1 };
lt = mgr.get_lt(dict_id, nullptr, compare_dbts, nullptr);
lt.create(nullptr, dict_id, nullptr, compare_dbts);
TXNID txnid_a = 1001;
TXNID txnid_b = 2001;
......@@ -113,15 +111,15 @@ void lock_request_unit_test::test_start_pending(void) {
const DBT *two = get_dbt(2);
// take a range lock using txnid b
r = lt->acquire_write_lock(txnid_b, zero, two, nullptr, false);
r = lt.acquire_write_lock(txnid_b, zero, two, nullptr, false);
invariant_zero(r);
locktree::lt_lock_request_info *info = lt->get_lock_request_info();
lt_lock_request_info *info = lt.get_lock_request_info();
// start a lock request for 1,1
// it should fail. the request should be stored and in the pending state.
request.create();
request.set(lt, txnid_a, one, one, lock_request::type::WRITE, false);
request.set(&lt, txnid_a, one, one, lock_request::type::WRITE, false);
r = request.start();
invariant(r == DB_LOCK_NOTGRANTED);
invariant(info->pending_lock_requests.size() == 1);
......@@ -134,20 +132,18 @@ void lock_request_unit_test::test_start_pending(void) {
invariant(compare_dbts(nullptr, &request.m_right_key_copy, one) == 0);
// release the range lock for txnid b
locktree_unit_test::locktree_test_release_lock(lt, txnid_b, zero, two);
locktree_unit_test::locktree_test_release_lock(&lt, txnid_b, zero, two);
// now retry the lock requests.
// it should transition the request to successfully complete.
lock_request::retry_all_lock_requests(lt);
lock_request::retry_all_lock_requests(&lt);
invariant(info->pending_lock_requests.size() == 0);
invariant(request.m_state == lock_request::state::COMPLETE);
invariant(request.m_complete_r == 0);
locktree_unit_test::locktree_test_release_lock(lt, txnid_a, one, one);
locktree_unit_test::locktree_test_release_lock(&lt, txnid_a, one, one);
request.destroy();
mgr.release_lt(lt);
mgr.destroy();
}
} /* namespace toku */
......
......@@ -98,12 +98,10 @@ static const uint64_t my_lock_wait_time = 10 * 1000; // 10 sec
// make sure deadlocks are detected when a lock request starts
void lock_request_unit_test::test_wait_time_callback(void) {
int r;
locktree::manager mgr;
locktree *lt;
locktree lt;
mgr.create(nullptr, nullptr, nullptr, nullptr);
DICTIONARY_ID dict_id = { 1 };
lt = mgr.get_lt(dict_id, nullptr, compare_dbts, nullptr);
lt.create(nullptr, dict_id, nullptr, compare_dbts);
TXNID txnid_a = 1001;
lock_request request_a;
......@@ -117,12 +115,12 @@ void lock_request_unit_test::test_wait_time_callback(void) {
const DBT *two = get_dbt(2);
// a locks 'one'
request_a.set(lt, txnid_a, one, one, lock_request::type::WRITE, false);
request_a.set(&lt, txnid_a, one, one, lock_request::type::WRITE, false);
r = request_a.start();
assert_zero(r);
// b tries to lock 'one'
request_b.set(lt, txnid_b, one, two, lock_request::type::WRITE, false);
request_b.set(&lt, txnid_b, one, two, lock_request::type::WRITE, false);
r = request_b.start();
assert(r == DB_LOCK_NOTGRANTED);
uint64_t t_start = toku_current_time_microsec();
......@@ -134,11 +132,8 @@ void lock_request_unit_test::test_wait_time_callback(void) {
assert(t_delta >= my_lock_wait_time);
request_b.destroy();
release_lock_and_retry_requests(lt, txnid_a, one, one);
release_lock_and_retry_requests(&lt, txnid_a, one, one);
request_a.destroy();
mgr.release_lt(lt);
mgr.destroy();
}
} /* namespace toku */
......
......@@ -105,11 +105,10 @@ namespace toku {
// test write lock conflicts when read or write locks exist
// test read lock conflicts when write locks exist
void locktree_unit_test::test_conflicts(void) {
locktree::manager mgr;
mgr.create(nullptr, nullptr, nullptr, nullptr);
DESCRIPTOR desc = nullptr;
locktree lt;
DICTIONARY_ID dict_id = { 1 };
locktree *lt = mgr.get_lt(dict_id, desc, compare_dbts, nullptr);
lt.create(nullptr, dict_id, nullptr, compare_dbts);
int r;
TXNID txnid_a = 1001;
......@@ -125,8 +124,8 @@ void locktree_unit_test::test_conflicts(void) {
// test_run == 0 means test with read lock
// test_run == 1 means test with write lock
#define ACQUIRE_LOCK(txn, left, right, conflicts) \
test_run == 0 ? lt->acquire_read_lock(txn, left, right, conflicts, false) \
: lt->acquire_write_lock(txn, left, right, conflicts, false)
test_run == 0 ? lt.acquire_read_lock(txn, left, right, conflicts, false) \
: lt.acquire_write_lock(txn, left, right, conflicts, false)
// acquire some locks for txnid_a
r = ACQUIRE_LOCK(txnid_a, one, one, nullptr);
......@@ -142,8 +141,8 @@ void locktree_unit_test::test_conflicts(void) {
// if test_run == 0, then read locks exist. only test write locks.
#define ACQUIRE_LOCK(txn, left, right, conflicts) \
sub_test_run == 0 && test_run == 1 ? \
lt->acquire_read_lock(txn, left, right, conflicts, false) \
: lt->acquire_write_lock(txn, left, right, conflicts, false)
lt.acquire_read_lock(txn, left, right, conflicts, false) \
: lt.acquire_write_lock(txn, left, right, conflicts, false)
// try to get point write locks for txnid_b, should fail
r = ACQUIRE_LOCK(txnid_b, one, one, nullptr);
invariant(r == DB_LOCK_NOTGRANTED);
......@@ -162,13 +161,10 @@ void locktree_unit_test::test_conflicts(void) {
#undef ACQUIRE_LOCK
}
lt->remove_overlapping_locks_for_txnid(txnid_a, one, one);
lt->remove_overlapping_locks_for_txnid(txnid_a, three, four);
invariant(no_row_locks(lt));
lt.remove_overlapping_locks_for_txnid(txnid_a, one, one);
lt.remove_overlapping_locks_for_txnid(txnid_a, three, four);
invariant(no_row_locks(&lt));
}
mgr.release_lt(lt);
mgr.destroy();
}
} /* namespace toku */
......
......@@ -95,27 +95,23 @@ namespace toku {
// test simple create and destroy of the locktree
void locktree_unit_test::test_create_destroy(void) {
locktree::manager mgr;
mgr.create(nullptr, nullptr, nullptr, nullptr);
DESCRIPTOR desc = nullptr;
locktree lt;
DICTIONARY_ID dict_id = { 1 };
locktree *lt = mgr.get_lt(dict_id, desc, compare_dbts, nullptr);
locktree::lt_lock_request_info *info = lt->get_lock_request_info();
lt.create(nullptr, dict_id, nullptr, compare_dbts);
lt_lock_request_info *info = lt.get_lock_request_info();
invariant_notnull(info);
toku_mutex_lock(&info->mutex);
toku_mutex_unlock(&info->mutex);
invariant(lt->m_dict_id.dictid == dict_id.dictid);
invariant(lt->m_reference_count == 1);
invariant(lt->m_rangetree != nullptr);
invariant(lt->m_userdata == nullptr);
invariant(lt.m_dict_id.dictid == dict_id.dictid);
invariant(lt.m_reference_count == 1);
invariant(lt.m_rangetree != nullptr);
invariant(lt.m_userdata == nullptr);
invariant(info->pending_lock_requests.size() == 0);
invariant(lt->m_sto_end_early_count == 0);
invariant(lt->m_sto_end_early_time == 0);
mgr.release_lt(lt);
mgr.destroy();
invariant(lt.m_sto_end_early_count == 0);
invariant(lt.m_sto_end_early_time == 0);
}
} /* namespace toku */
......
......@@ -119,21 +119,7 @@ static int locktree_write_lock(locktree *lt, TXNID txn_id, int64_t left_k, int64
return lt->acquire_write_lock(txn_id, &left, &right, nullptr, big_txn);
}
#if 0
static locktree **big_txn_lt;
static int n_big_txn_lt;
static int get_locktrees_touched_by_txn(TXNID txn_id UU(), void *txn_extra UU(), locktree ***ret_locktrees, int *ret_num_locktrees) {
locktree **locktrees = (locktree **) toku_malloc(n_big_txn_lt * sizeof (locktree *));
for (int i = 0; i < n_big_txn_lt; i++)
locktrees[i] = big_txn_lt[i];
*ret_locktrees = locktrees;
*ret_num_locktrees = n_big_txn_lt;
return 0;
}
#endif
static void run_big_txn(locktree::manager *mgr UU(), locktree **lt, int n_lt, TXNID txn_id) {
static void run_big_txn(locktree_manager *mgr UU(), locktree **lt, int n_lt, TXNID txn_id) {
int64_t last_i = -1;
for (int64_t i = 0; !killed; i++) {
for (int j = 0; j < n_lt; j++) {
......@@ -157,7 +143,7 @@ static void run_big_txn(locktree::manager *mgr UU(), locktree **lt, int n_lt, TX
}
struct big_arg {
locktree::manager *mgr;
locktree_manager *mgr;
locktree **lt;
int n_lt;
TXNID txn_id;
......@@ -171,7 +157,7 @@ static void *big_f(void *_arg) {
return arg;
}
static void run_small_txn(locktree::manager *mgr UU(), locktree *lt, TXNID txn_id, int64_t k) {
static void run_small_txn(locktree_manager *mgr UU(), locktree *lt, TXNID txn_id, int64_t k) {
int64_t i;
for (i = 0; !killed; i++) {
uint64_t t_start = toku_current_time_microsec();
......@@ -190,7 +176,7 @@ static void run_small_txn(locktree::manager *mgr UU(), locktree *lt, TXNID txn_i
}
struct small_arg {
locktree::manager *mgr;
locktree_manager *mgr;
locktree *lt;
TXNID txn_id;
int64_t k;
......@@ -209,7 +195,7 @@ static void e_callback(TXNID txnid, const locktree *lt, const range_buffer &buff
printf("%u %s %" PRIu64 " %p %d %p\n", toku_os_gettid(), __FUNCTION__, txnid, lt, buffer.get_num_ranges(), extra);
}
static uint64_t get_escalation_count(locktree::manager &mgr) {
static uint64_t get_escalation_count(locktree_manager &mgr) {
LTM_STATUS_S ltm_status;
mgr.get_status(&ltm_status);
......@@ -251,7 +237,7 @@ int main(int argc, const char *argv[]) {
int r;
// create a manager
locktree::manager mgr;
locktree_manager mgr;
mgr.create(nullptr, nullptr, e_callback, nullptr);
mgr.set_max_lock_memory(max_lock_memory);
......@@ -264,11 +250,6 @@ int main(int argc, const char *argv[]) {
big_lt[i] = mgr.get_lt(dict_id, nullptr, compare_dbts, nullptr);
}
#if 0
big_txn_lt = big_lt;
n_big_txn_lt = n_big;
#endif
dict_id = { next_dict_id }; next_dict_id++;
locktree *small_lt = mgr.get_lt(dict_id, nullptr, compare_dbts, nullptr);
......
......@@ -118,7 +118,7 @@ static int locktree_write_lock(locktree *lt, TXNID txn_id, int64_t left_k, int64
return lt->acquire_write_lock(txn_id, &left, &right, nullptr, big_txn);
}
static void run_big_txn(locktree::manager *mgr UU(), locktree *lt, TXNID txn_id, int64_t start_i) {
static void run_big_txn(locktree_manager *mgr UU(), locktree *lt, TXNID txn_id, int64_t start_i) {
fprintf(stderr, "%u run_big_txn %p %" PRIu64 " %" PRId64 "\n", toku_os_gettid(), lt, txn_id, start_i);
int64_t last_i = -1;
for (int64_t i = start_i; !killed; i++) {
......@@ -141,7 +141,7 @@ static void run_big_txn(locktree::manager *mgr UU(), locktree *lt, TXNID txn_id,
}
struct arg {
locktree::manager *mgr;
locktree_manager *mgr;
locktree *lt;
TXNID txn_id;
int64_t start_i;
......@@ -158,7 +158,7 @@ static void e_callback(TXNID txnid, const locktree *lt, const range_buffer &buff
printf("%u %s %" PRIu64 " %p %d %p\n", toku_os_gettid(), __FUNCTION__, txnid, lt, buffer.get_num_ranges(), extra);
}
static uint64_t get_escalation_count(locktree::manager &mgr) {
static uint64_t get_escalation_count(locktree_manager &mgr) {
LTM_STATUS_S ltm_status;
mgr.get_status(&ltm_status);
......@@ -205,7 +205,7 @@ int main(int argc, const char *argv[]) {
int r;
// create a manager
locktree::manager mgr;
locktree_manager mgr;
mgr.create(nullptr, nullptr, e_callback, nullptr);
mgr.set_max_lock_memory(max_lock_memory);
......
......@@ -118,7 +118,7 @@ static int locktree_write_lock(locktree *lt, TXNID txn_id, int64_t left_k, int64
return lt->acquire_write_lock(txn_id, &left, &right, nullptr, big_txn);
}
static void run_big_txn(locktree::manager *mgr UU(), locktree *lt, TXNID txn_id, int64_t start_i) {
static void run_big_txn(locktree_manager *mgr UU(), locktree *lt, TXNID txn_id, int64_t start_i) {
fprintf(stderr, "%u run_big_txn %p %" PRIu64 " %" PRId64 "\n", toku_os_gettid(), lt, txn_id, start_i);
int64_t last_i = -1;
for (int64_t i = start_i; !killed; i++) {
......@@ -141,7 +141,7 @@ static void run_big_txn(locktree::manager *mgr UU(), locktree *lt, TXNID txn_id,
}
struct arg {
locktree::manager *mgr;
locktree_manager *mgr;
locktree *lt;
TXNID txn_id;
int64_t start_i;
......@@ -158,7 +158,7 @@ static void e_callback(TXNID txnid, const locktree *lt, const range_buffer &buff
printf("%u %s %" PRIu64 " %p %d %p\n", toku_os_gettid(), __FUNCTION__, txnid, lt, buffer.get_num_ranges(), extra);
}
static uint64_t get_escalation_count(locktree::manager &mgr) {
static uint64_t get_escalation_count(locktree_manager &mgr) {
LTM_STATUS_S ltm_status;
mgr.get_status(&ltm_status);
......@@ -205,7 +205,7 @@ int main(int argc, const char *argv[]) {
int r;
// create a manager
locktree::manager mgr;
locktree_manager mgr;
mgr.create(nullptr, nullptr, e_callback, nullptr);
mgr.set_max_lock_memory(max_lock_memory);
......
......@@ -123,7 +123,7 @@ static void e_callback(TXNID txnid, const locktree *lt, const range_buffer &buff
printf("%u %s %" PRIu64 " %p %d %p\n", toku_os_gettid(), __FUNCTION__, txnid, lt, buffer.get_num_ranges(), extra);
}
static uint64_t get_escalation_count(locktree::manager &mgr) {
static uint64_t get_escalation_count(locktree_manager &mgr) {
LTM_STATUS_S ltm_status;
mgr.get_status(&ltm_status);
......@@ -159,7 +159,7 @@ int main(int argc, const char *argv[]) {
int r;
// create a manager
locktree::manager mgr;
locktree_manager mgr;
mgr.create(nullptr, nullptr, e_callback, nullptr);
mgr.set_max_lock_memory(max_lock_memory);
......
......@@ -126,7 +126,7 @@ static int locktree_write_lock(locktree *lt, TXNID txn_id, int64_t left_k, int64
return lt->acquire_write_lock(txn_id, &left, &right, nullptr, big_txn);
}
static void run_big_txn(locktree::manager *mgr UU(), locktree *lt, TXNID txn_id) {
static void run_big_txn(locktree_manager *mgr UU(), locktree *lt, TXNID txn_id) {
int64_t last_i = -1;
for (int64_t i = 0; !killed; i++) {
uint64_t t_start = toku_current_time_microsec();
......@@ -144,7 +144,7 @@ static void run_big_txn(locktree::manager *mgr UU(), locktree *lt, TXNID txn_id)
locktree_release_lock(lt, txn_id, 0, last_i); // release the range 0 .. last_i
}
static void run_small_txn(locktree::manager *mgr UU(), locktree *lt, TXNID txn_id, int64_t k) {
static void run_small_txn(locktree_manager *mgr UU(), locktree *lt, TXNID txn_id, int64_t k) {
for (int64_t i = 0; !killed; i++) {
uint64_t t_start = toku_current_time_microsec();
int r = locktree_write_lock(lt, txn_id, k, k, false);
......@@ -160,7 +160,7 @@ static void run_small_txn(locktree::manager *mgr UU(), locktree *lt, TXNID txn_i
}
struct arg {
locktree::manager *mgr;
locktree_manager *mgr;
locktree *lt;
TXNID txn_id;
int64_t k;
......@@ -183,7 +183,7 @@ static void e_callback(TXNID txnid, const locktree *lt, const range_buffer &buff
printf("%u %s %" PRIu64 " %p %d %p\n", toku_os_gettid(), __FUNCTION__, txnid, lt, buffer.get_num_ranges(), extra);
}
static uint64_t get_escalation_count(locktree::manager &mgr) {
static uint64_t get_escalation_count(locktree_manager &mgr) {
LTM_STATUS_S ltm_status;
mgr.get_status(&ltm_status);
......@@ -223,7 +223,7 @@ int main(int argc, const char *argv[]) {
int r;
// create a manager
locktree::manager mgr;
locktree_manager mgr;
mgr.create(nullptr, nullptr, e_callback, nullptr);
mgr.set_max_lock_memory(max_lock_memory);
......
......@@ -95,11 +95,10 @@ namespace toku {
// test that ranges with infinite endpoints work
void locktree_unit_test::test_infinity(void) {
locktree::manager mgr;
mgr.create(nullptr, nullptr, nullptr, nullptr);
DESCRIPTOR desc = nullptr;
locktree lt;
DICTIONARY_ID dict_id = { 1 };
locktree *lt = mgr.get_lt(dict_id, desc, compare_dbts, nullptr);
lt.create(nullptr, dict_id, nullptr, compare_dbts);
int r;
TXNID txnid_a = 1001;
......@@ -112,60 +111,57 @@ void locktree_unit_test::test_infinity(void) {
const DBT max_int = max_dbt();
// txn A will lock -inf, 5.
r = lt->acquire_write_lock(txnid_a, toku_dbt_negative_infinity(), five, nullptr, false);
r = lt.acquire_write_lock(txnid_a, toku_dbt_negative_infinity(), five, nullptr, false);
invariant(r == 0);
// txn B will fail to get any lock <= 5, even min_int
r = lt->acquire_write_lock(txnid_b, five, five, nullptr, false);
r = lt.acquire_write_lock(txnid_b, five, five, nullptr, false);
invariant(r == DB_LOCK_NOTGRANTED);
r = lt->acquire_write_lock(txnid_b, zero, one, nullptr, false);
r = lt.acquire_write_lock(txnid_b, zero, one, nullptr, false);
invariant(r == DB_LOCK_NOTGRANTED);
r = lt->acquire_write_lock(txnid_b, &min_int, &min_int, nullptr, false);
r = lt.acquire_write_lock(txnid_b, &min_int, &min_int, nullptr, false);
invariant(r == DB_LOCK_NOTGRANTED);
r = lt->acquire_write_lock(txnid_b, toku_dbt_negative_infinity(), &min_int, nullptr, false);
r = lt.acquire_write_lock(txnid_b, toku_dbt_negative_infinity(), &min_int, nullptr, false);
invariant(r == DB_LOCK_NOTGRANTED);
lt->remove_overlapping_locks_for_txnid(txnid_a, toku_dbt_negative_infinity(), five);
lt.remove_overlapping_locks_for_txnid(txnid_a, toku_dbt_negative_infinity(), five);
// txn A will lock 1, +inf
r = lt->acquire_write_lock(txnid_a, one, toku_dbt_positive_infinity(), nullptr, false);
r = lt.acquire_write_lock(txnid_a, one, toku_dbt_positive_infinity(), nullptr, false);
invariant(r == 0);
// txn B will fail to get any lock >= 1, even max_int
r = lt->acquire_write_lock(txnid_b, one, one, nullptr, false);
r = lt.acquire_write_lock(txnid_b, one, one, nullptr, false);
invariant(r == DB_LOCK_NOTGRANTED);
r = lt->acquire_write_lock(txnid_b, two, five, nullptr, false);
r = lt.acquire_write_lock(txnid_b, two, five, nullptr, false);
invariant(r == DB_LOCK_NOTGRANTED);
r = lt->acquire_write_lock(txnid_b, &max_int, &max_int, nullptr, false);
r = lt.acquire_write_lock(txnid_b, &max_int, &max_int, nullptr, false);
invariant(r == DB_LOCK_NOTGRANTED);
r = lt->acquire_write_lock(txnid_b, &max_int, toku_dbt_positive_infinity(), nullptr, false);
r = lt.acquire_write_lock(txnid_b, &max_int, toku_dbt_positive_infinity(), nullptr, false);
invariant(r == DB_LOCK_NOTGRANTED);
lt->remove_overlapping_locks_for_txnid(txnid_a, toku_dbt_negative_infinity(), five);
lt.remove_overlapping_locks_for_txnid(txnid_a, toku_dbt_negative_infinity(), five);
// txn A will lock -inf, +inf
r = lt->acquire_write_lock(txnid_a, toku_dbt_negative_infinity(), toku_dbt_positive_infinity(), nullptr, false);
r = lt.acquire_write_lock(txnid_a, toku_dbt_negative_infinity(), toku_dbt_positive_infinity(), nullptr, false);
invariant(r == 0);
// txn B will fail to get any lock
r = lt->acquire_write_lock(txnid_b, zero, one, nullptr, false);
r = lt.acquire_write_lock(txnid_b, zero, one, nullptr, false);
invariant(r == DB_LOCK_NOTGRANTED);
r = lt->acquire_write_lock(txnid_b, two, five, nullptr, false);
r = lt.acquire_write_lock(txnid_b, two, five, nullptr, false);
invariant(r == DB_LOCK_NOTGRANTED);
r = lt->acquire_write_lock(txnid_b, &min_int, &min_int, nullptr, false);
r = lt.acquire_write_lock(txnid_b, &min_int, &min_int, nullptr, false);
invariant(r == DB_LOCK_NOTGRANTED);
r = lt->acquire_write_lock(txnid_b, &min_int, &max_int, nullptr, false);
r = lt.acquire_write_lock(txnid_b, &min_int, &max_int, nullptr, false);
invariant(r == DB_LOCK_NOTGRANTED);
r = lt->acquire_write_lock(txnid_b, &max_int, &max_int, nullptr, false);
r = lt.acquire_write_lock(txnid_b, &max_int, &max_int, nullptr, false);
invariant(r == DB_LOCK_NOTGRANTED);
r = lt->acquire_write_lock(txnid_b, toku_dbt_negative_infinity(), toku_dbt_negative_infinity(), nullptr, false);
r = lt.acquire_write_lock(txnid_b, toku_dbt_negative_infinity(), toku_dbt_negative_infinity(), nullptr, false);
invariant(r == DB_LOCK_NOTGRANTED);
r = lt->acquire_write_lock(txnid_b, toku_dbt_negative_infinity(), toku_dbt_positive_infinity(), nullptr, false);
r = lt.acquire_write_lock(txnid_b, toku_dbt_negative_infinity(), toku_dbt_positive_infinity(), nullptr, false);
invariant(r == DB_LOCK_NOTGRANTED);
r = lt->acquire_write_lock(txnid_b, toku_dbt_positive_infinity(), toku_dbt_positive_infinity(), nullptr, false);
r = lt.acquire_write_lock(txnid_b, toku_dbt_positive_infinity(), toku_dbt_positive_infinity(), nullptr, false);
invariant(r == DB_LOCK_NOTGRANTED);
lt->remove_overlapping_locks_for_txnid(txnid_a, toku_dbt_negative_infinity(), toku_dbt_positive_infinity());
mgr.release_lt(lt);
mgr.destroy();
lt.remove_overlapping_locks_for_txnid(txnid_a, toku_dbt_negative_infinity(), toku_dbt_positive_infinity());
}
} /* namespace toku */
......
......@@ -107,18 +107,16 @@ static int my_compare_dbts(DB *db, const DBT *a, const DBT *b) {
// test that get/set userdata works, and that get_manager() works
void locktree_unit_test::test_misc(void) {
locktree::manager mgr;
mgr.create(nullptr, nullptr, nullptr, nullptr);
DESCRIPTOR desc = nullptr;
locktree lt;
DICTIONARY_ID dict_id = { 1 };
locktree *lt = mgr.get_lt(dict_id, desc, my_compare_dbts, nullptr);
lt.create(nullptr, dict_id, nullptr, my_compare_dbts);
invariant(lt->get_userdata() == nullptr);
invariant(lt.get_userdata() == nullptr);
int userdata;
lt->set_userdata(&userdata);
invariant(lt->get_userdata() == &userdata);
lt->set_userdata(nullptr);
invariant(lt->get_userdata() == nullptr);
lt.set_userdata(&userdata);
invariant(lt.get_userdata() == &userdata);
lt.set_userdata(nullptr);
invariant(lt.get_userdata() == nullptr);
int r;
DBT dbt_a, dbt_b;
......@@ -128,17 +126,14 @@ void locktree_unit_test::test_misc(void) {
// make sure the comparator object has the correct
// descriptor when we set the locktree's descriptor
lt->set_descriptor(&d1);
lt.set_descriptor(&d1);
expected_descriptor = &d1;
r = lt->m_cmp->compare(&dbt_a, &dbt_b);
r = lt.m_cmp->compare(&dbt_a, &dbt_b);
invariant(r == expected_comparison_magic);
lt->set_descriptor(&d2);
lt.set_descriptor(&d2);
expected_descriptor = &d2;
r = lt->m_cmp->compare(&dbt_a, &dbt_b);
r = lt.m_cmp->compare(&dbt_a, &dbt_b);
invariant(r == expected_comparison_magic);
mgr.release_lt(lt);
mgr.destroy();
}
} /* namespace toku */
......
......@@ -98,11 +98,10 @@ namespace toku {
// write locks if overlapping and ensure that existing read
// or write locks are consolidated by overlapping relocks.
void locktree_unit_test::test_overlapping_relock(void) {
locktree::manager mgr;
mgr.create(nullptr, nullptr, nullptr, nullptr);
DESCRIPTOR desc = nullptr;
locktree lt;
DICTIONARY_ID dict_id = { 1 };
locktree *lt = mgr.get_lt(dict_id, desc, compare_dbts, nullptr);
lt.create(nullptr, dict_id, nullptr, compare_dbts);
const DBT *zero = get_dbt(0);
const DBT *one = get_dbt(1);
......@@ -121,15 +120,15 @@ void locktree_unit_test::test_overlapping_relock(void) {
// do something. at the end of the test, we release 100, 100.
const TXNID the_other_txnid = 9999;
const DBT *hundred = get_dbt(100);
r = lt->acquire_write_lock(the_other_txnid, hundred, hundred, nullptr, false);
r = lt.acquire_write_lock(the_other_txnid, hundred, hundred, nullptr, false);
invariant(r == 0);
for (int test_run = 0; test_run < 2; test_run++) {
// test_run == 0 means test with read lock
// test_run == 1 means test with write lock
#define ACQUIRE_LOCK(txn, left, right, conflicts) \
test_run == 0 ? lt->acquire_read_lock(txn, left, right, conflicts, false) \
: lt->acquire_write_lock(txn, left, right, conflicts, false)
test_run == 0 ? lt.acquire_read_lock(txn, left, right, conflicts, false) \
: lt.acquire_write_lock(txn, left, right, conflicts, false)
// lock [1,1] and [2,2]. then lock [1,2].
// ensure only [1,2] exists in the tree
......@@ -157,10 +156,10 @@ void locktree_unit_test::test_overlapping_relock(void) {
return true;
}
} verify_fn;
verify_fn.cmp = lt->m_cmp;
verify_fn.cmp = lt.m_cmp;
#define do_verify() \
do { verify_fn.saw_the_other = false; locktree_iterate<verify_fn_obj>(lt, &verify_fn); } while (0)
do { verify_fn.saw_the_other = false; locktree_iterate<verify_fn_obj>(&lt, &verify_fn); } while (0)
keyrange range;
range.create(one, two);
......@@ -170,9 +169,9 @@ void locktree_unit_test::test_overlapping_relock(void) {
// unlocking [1,1] should remove the only range,
// the other unlocks shoudl do nothing.
lt->remove_overlapping_locks_for_txnid(txnid_a, one, one);
lt->remove_overlapping_locks_for_txnid(txnid_a, two, two);
lt->remove_overlapping_locks_for_txnid(txnid_a, one, two);
lt.remove_overlapping_locks_for_txnid(txnid_a, one, one);
lt.remove_overlapping_locks_for_txnid(txnid_a, two, two);
lt.remove_overlapping_locks_for_txnid(txnid_a, one, two);
// try overlapping from the right
r = ACQUIRE_LOCK(txnid_a, one, three, nullptr);
......@@ -197,16 +196,13 @@ void locktree_unit_test::test_overlapping_relock(void) {
do_verify();
// release one of the locks we acquired. this should clean up the whole range.
lt->remove_overlapping_locks_for_txnid(txnid_a, zero, four);
lt.remove_overlapping_locks_for_txnid(txnid_a, zero, four);
#undef ACQUIRE_LOCK
}
// remove the other txnid's lock now
lt->remove_overlapping_locks_for_txnid(the_other_txnid, hundred, hundred);
mgr.release_lt(lt);
mgr.destroy();
lt.remove_overlapping_locks_for_txnid(the_other_txnid, hundred, hundred);
}
} /* namespace toku */
......
......@@ -95,11 +95,11 @@ namespace toku {
// test simple, non-overlapping read locks and then write locks
void locktree_unit_test::test_simple_lock(void) {
locktree::manager mgr;
locktree_manager mgr;
mgr.create(nullptr, nullptr, nullptr, nullptr);
DESCRIPTOR desc = nullptr;
DICTIONARY_ID dict_id = { 1 };
locktree *lt = mgr.get_lt(dict_id, desc, compare_dbts, nullptr);
locktree *lt = mgr.get_lt(dict_id, nullptr, compare_dbts, nullptr);
int r;
TXNID txnid_a = 1001;
......
......@@ -98,11 +98,10 @@ namespace toku {
// write locks if overlapping and ensure that existing read
// or write locks are consolidated by overlapping relocks.
void locktree_unit_test::test_single_txnid_optimization(void) {
locktree::manager mgr;
mgr.create(nullptr, nullptr, nullptr, nullptr);
DESCRIPTOR desc = nullptr;
locktree lt;
DICTIONARY_ID dict_id = { 1 };
locktree *lt = mgr.get_lt(dict_id, desc, compare_dbts, nullptr);
lt.create(nullptr, dict_id, nullptr, compare_dbts);
const DBT *zero = get_dbt(0);
const DBT *one = get_dbt(1);
......@@ -124,13 +123,13 @@ void locktree_unit_test::test_single_txnid_optimization(void) {
buffer.create();
#define lock_and_append_point_for_txnid_a(key) \
r = lt->acquire_write_lock(txnid_a, key, key, nullptr, false); \
r = lt.acquire_write_lock(txnid_a, key, key, nullptr, false); \
invariant_zero(r); \
buffer.append(key, key);
#define maybe_point_locks_for_txnid_b(i) \
if (where == i) { \
r = lt->acquire_write_lock(txnid_b, one, one, nullptr, false); \
r = lt.acquire_write_lock(txnid_b, one, one, nullptr, false); \
invariant_zero(r); \
}
......@@ -143,7 +142,7 @@ void locktree_unit_test::test_single_txnid_optimization(void) {
lock_and_append_point_for_txnid_a(zero);
maybe_point_locks_for_txnid_b(2);
lt->release_locks(txnid_a, &buffer);
lt.release_locks(txnid_a, &buffer);
// txnid b does not take a lock on iteration 3
if (where != 3) {
......@@ -158,21 +157,18 @@ void locktree_unit_test::test_single_txnid_optimization(void) {
return true;
}
} verify_fn;
verify_fn.cmp = lt->m_cmp;
verify_fn.cmp = lt.m_cmp;
keyrange range;
range.create(one, one);
verify_fn.expected_txnid = txnid_b;
verify_fn.expected_range = &range;
locktree_iterate<verify_fn_obj>(lt, &verify_fn);
lt->remove_overlapping_locks_for_txnid(txnid_b, one, one);
locktree_iterate<verify_fn_obj>(&lt, &verify_fn);
lt.remove_overlapping_locks_for_txnid(txnid_b, one, one);
}
buffer.destroy();
}
mgr.release_lt(lt);
mgr.destroy();
}
} /* namespace toku */
......
......@@ -94,17 +94,14 @@ PATENT RIGHTS GRANT:
namespace toku {
void manager_unit_test::test_create_destroy(void) {
locktree::manager mgr;
locktree::manager::lt_create_cb create_callback =
(locktree::manager::lt_create_cb) (long) 1;
locktree::manager::lt_destroy_cb destroy_callback =
(locktree::manager::lt_destroy_cb) (long) 2;
locktree::manager::lt_escalate_cb escalate_callback =
(locktree::manager::lt_escalate_cb) (long) 3;
locktree_manager mgr;
lt_create_cb create_callback = (lt_create_cb) (long) 1;
lt_destroy_cb destroy_callback = (lt_destroy_cb) (long) 2;
lt_escalate_cb escalate_callback = (lt_escalate_cb) (long) 3;
void *extra = (void *) (long) 4;
mgr.create(create_callback, destroy_callback, escalate_callback, extra);
invariant(mgr.m_max_lock_memory == locktree::manager::DEFAULT_MAX_LOCK_MEMORY);
invariant(mgr.m_max_lock_memory == locktree_manager::DEFAULT_MAX_LOCK_MEMORY);
invariant(mgr.m_current_lock_memory == 0);
invariant(mgr.m_escalation_count == 0);
invariant(mgr.m_escalation_time == 0);
......
......@@ -94,7 +94,7 @@ PATENT RIGHTS GRANT:
namespace toku {
void manager_unit_test::test_lt_map(void) {
locktree::manager mgr;
locktree_manager mgr;
mgr.create(nullptr, nullptr, nullptr, nullptr);
locktree aa;
......
......@@ -95,7 +95,7 @@ namespace toku {
void manager_unit_test::test_params(void) {
int r;
locktree::manager mgr;
locktree_manager mgr;
mgr.create(nullptr, nullptr, nullptr, nullptr);
uint64_t new_max_lock_memory = 15307752356;
......
......@@ -108,7 +108,7 @@ static void destroy_cb(locktree *lt) {
}
void manager_unit_test::test_reference_release_lt(void) {
locktree::manager mgr;
locktree_manager mgr;
mgr.create(create_cb, destroy_cb, nullptr, nullptr);
DICTIONARY_ID a = { 0 };
......
......@@ -112,8 +112,7 @@ static void assert_status(LTM_STATUS ltm_status, const char *keyname, uint64_t v
}
void manager_unit_test::test_status(void) {
locktree::manager mgr;
locktree_manager mgr;
mgr.create(nullptr, nullptr, nullptr, nullptr);
LTM_STATUS_S status;
......
......@@ -150,7 +150,7 @@ struct __toku_db_env_internal {
unsigned long cachetable_size;
CACHETABLE cachetable;
TOKULOGGER logger;
toku::locktree::manager ltm;
toku::locktree_manager ltm;
lock_timeout_callback lock_wait_timeout_callback; // Called when a lock request times out waiting for a lock.
DB *directory; // Maps dnames to inames
......@@ -189,7 +189,7 @@ struct __toku_db_env_internal {
// test-only environment function for running lock escalation
static inline void toku_env_run_lock_escalation_for_test(DB_ENV *env) {
toku::locktree::manager *mgr = &env->i->ltm;
toku::locktree_manager *mgr = &env->i->ltm;
mgr->run_escalation_for_test();
}
......
......@@ -2348,7 +2348,7 @@ env_iterate_pending_lock_requests(DB_ENV *env,
return EINVAL;
}
toku::locktree::manager *mgr = &env->i->ltm;
toku::locktree_manager *mgr = &env->i->ltm;
ltm_iterate_requests_callback_extra e(env, callback, extra);
return mgr->iterate_pending_lock_requests(ltm_iterate_requests_callback, &e);
}
......
......@@ -137,7 +137,7 @@ static void db_txn_note_row_lock(DB *db, DB_TXN *txn, const DBT *left_key, const
map->insert_at(ranges, idx);
// let the manager know we're referencing this lt
toku::locktree::manager *ltm = &txn->mgrp->i->ltm;
toku::locktree_manager *ltm = &txn->mgrp->i->ltm;
ltm->reference_lt(ranges.lt);
} else {
invariant_zero(r);
......@@ -148,7 +148,7 @@ static void db_txn_note_row_lock(DB *db, DB_TXN *txn, const DBT *left_key, const
ranges.buffer->append(left_key, right_key);
size_t new_num_bytes = ranges.buffer->get_num_bytes();
invariant(new_num_bytes > old_num_bytes);
lt->get_mem_tracker()->note_mem_used(new_num_bytes - old_num_bytes);
lt->get_manager()->note_mem_used(new_num_bytes - old_num_bytes);
toku_mutex_unlock(&db_txn_struct_i(txn)->txn_mutex);
}
......@@ -201,7 +201,7 @@ void toku_db_txn_escalate_callback(TXNID txnid, const toku::locktree *lt, const
//
// We could theoretically steal the memory from the caller instead of copying
// it, but it's simpler to have a callback API that doesn't transfer memory ownership.
lt->get_mem_tracker()->note_mem_released(ranges.buffer->get_num_bytes());
lt->get_manager()->note_mem_released(ranges.buffer->get_num_bytes());
ranges.buffer->destroy();
ranges.buffer->create();
toku::range_buffer::iterator iter;
......@@ -211,7 +211,7 @@ void toku_db_txn_escalate_callback(TXNID txnid, const toku::locktree *lt, const
ranges.buffer->append(rec.get_left_key(), rec.get_right_key());
iter.next();
}
lt->get_mem_tracker()->note_mem_used(ranges.buffer->get_num_bytes());
lt->get_manager()->note_mem_used(ranges.buffer->get_num_bytes());
} else {
// In rare cases, we may not find the associated locktree, because we are
// racing with the transaction trying to add this locktree to the lt map
......@@ -315,7 +315,7 @@ void toku_db_release_lt_key_ranges(DB_TXN *txn, txn_lt_key_ranges *ranges) {
// release all of the locks this txn has ever successfully
// acquired and stored in the range buffer for this locktree
lt->release_locks(txnid, ranges->buffer);
lt->get_mem_tracker()->note_mem_released(ranges->buffer->get_num_bytes());
lt->get_manager()->note_mem_released(ranges->buffer->get_num_bytes());
ranges->buffer->destroy();
toku_free(ranges->buffer);
......@@ -324,6 +324,6 @@ void toku_db_release_lt_key_ranges(DB_TXN *txn, txn_lt_key_ranges *ranges) {
toku::lock_request::retry_all_lock_requests(lt);
// Release our reference on this locktree
toku::locktree::manager *ltm = &txn->mgrp->i->ltm;
toku::locktree_manager *ltm = &txn->mgrp->i->ltm;
ltm->release_lt(lt);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment