Commit 85da482b authored by Rich Prohaska's avatar Rich Prohaska Committed by Yoni Fogel

#3520 increase test coverage of the lock and range trees refs[t:3520]

git-svn-id: file:///svn/toku/tokudb@42973 c7de825b-a66e-492c-adef-691d508d4ae1
parent 80359de8
...@@ -240,15 +240,11 @@ toku_ltm_create(toku_ltm** pmgr, ...@@ -240,15 +240,11 @@ toku_ltm_create(toku_ltm** pmgr,
int r = ENOSYS; int r = ENOSYS;
toku_ltm* mgr = NULL; toku_ltm* mgr = NULL;
if (!pmgr || !locks_limit) { if (!pmgr || !locks_limit || panic == NULL) {
r = EINVAL; goto cleanup; r = EINVAL; goto cleanup;
} }
assert(panic);
mgr = (toku_ltm*) toku_malloc(sizeof(*mgr)); mgr = (toku_ltm*) toku_xmalloc(sizeof(*mgr));
if (!mgr) {
r = ENOMEM; goto cleanup;
}
memset(mgr, 0, sizeof(toku_ltm)); memset(mgr, 0, sizeof(toku_ltm));
r = toku_ltm_set_max_locks(mgr, locks_limit); r = toku_ltm_set_max_locks(mgr, locks_limit);
...@@ -260,18 +256,11 @@ toku_ltm_create(toku_ltm** pmgr, ...@@ -260,18 +256,11 @@ toku_ltm_create(toku_ltm** pmgr,
mgr->panic = panic; mgr->panic = panic;
r = toku_lth_create(&mgr->lth); r = toku_lth_create(&mgr->lth);
if (r != 0) assert(r == 0 && mgr->lth);
goto cleanup;
if (!mgr->lth) {
r = ENOMEM; goto cleanup;
}
r = toku_idlth_create(&mgr->idlth); r = toku_idlth_create(&mgr->idlth);
if (r != 0) assert(r == 0 && mgr->idlth);
goto cleanup;
if (!mgr->idlth) {
r = ENOMEM; goto cleanup;
}
toku_mutex_init(&mgr->mutex, NULL); toku_mutex_init(&mgr->mutex, NULL);
mgr->mutex_locked = false; mgr->mutex_locked = false;
DRD_IGNORE_VAR(mgr->status); DRD_IGNORE_VAR(mgr->status);
...@@ -280,10 +269,8 @@ toku_ltm_create(toku_ltm** pmgr, ...@@ -280,10 +269,8 @@ toku_ltm_create(toku_ltm** pmgr,
cleanup: cleanup:
if (r != 0) { if (r != 0) {
if (mgr) { if (mgr) {
if (mgr->lth) assert(mgr->lth == NULL);
toku_lth_close(mgr->lth); assert(mgr->idlth == NULL);
if (mgr->idlth)
toku_idlth_close(mgr->idlth);
toku_free(mgr); toku_free(mgr);
} }
} }
...@@ -293,23 +280,17 @@ cleanup: ...@@ -293,23 +280,17 @@ cleanup:
// For now, ltm_open does nothing. // For now, ltm_open does nothing.
int int
toku_ltm_open(toku_ltm *mgr) { toku_ltm_open(toku_ltm *mgr) {
int r; assert(mgr);
if (!mgr) return 0;
r = EINVAL;
else
r = 0;
return r;
} }
int int
toku_ltm_close(toku_ltm* mgr) { toku_ltm_close(toku_ltm* mgr) {
assert(mgr);
int r = ENOSYS; int r = ENOSYS;
int first_error = 0; int first_error = 0;
if (!mgr) {
r = EINVAL; goto cleanup;
}
toku_lth_start_scan(mgr->lth); toku_lth_start_scan(mgr->lth);
toku_lock_tree* lt; toku_lock_tree* lt;
while ((lt = toku_lth_next(mgr->lth)) != NULL) { while ((lt = toku_lth_next(mgr->lth)) != NULL) {
...@@ -324,9 +305,7 @@ toku_ltm_close(toku_ltm* mgr) { ...@@ -324,9 +305,7 @@ toku_ltm_close(toku_ltm* mgr) {
assert(mgr->curr_locks == 0 && mgr->curr_lock_memory == 0); assert(mgr->curr_locks == 0 && mgr->curr_lock_memory == 0);
toku_free(mgr); toku_free(mgr);
r = first_error; return first_error;
cleanup:
return r;
} }
int int
...@@ -1356,36 +1335,26 @@ toku_lt_create(toku_lock_tree** ptree, ...@@ -1356,36 +1335,26 @@ toku_lt_create(toku_lock_tree** ptree,
r = EINVAL; goto cleanup; r = EINVAL; goto cleanup;
} }
tmp_tree = (toku_lock_tree*)toku_malloc(sizeof(*tmp_tree)); tmp_tree = (toku_lock_tree*)toku_xmalloc(sizeof(*tmp_tree));
if (!tmp_tree) { r = ENOMEM; goto cleanup; }
memset(tmp_tree, 0, sizeof(toku_lock_tree)); memset(tmp_tree, 0, sizeof(toku_lock_tree));
tmp_tree->mgr = mgr; tmp_tree->mgr = mgr;
tmp_tree->compare_fun = compare_fun; tmp_tree->compare_fun = compare_fun;
tmp_tree->lock_escalation_allowed = TRUE; tmp_tree->lock_escalation_allowed = TRUE;
r = toku_rt_create(&tmp_tree->borderwrite, toku_lt_point_cmp, lt_txn_cmp, FALSE, r = toku_rt_create(&tmp_tree->borderwrite, toku_lt_point_cmp, lt_txn_cmp, FALSE,
ltm_incr_lock_memory_callback, ltm_decr_lock_memory_callback, mgr); ltm_incr_lock_memory_callback, ltm_decr_lock_memory_callback, mgr);
if (r != 0) assert(r == 0);
goto cleanup;
r = toku_rth_create(&tmp_tree->rth); r = toku_rth_create(&tmp_tree->rth);
if (r != 0) assert(r == 0);
goto cleanup;
r = toku_rth_create(&tmp_tree->txns_to_unlock); r = toku_rth_create(&tmp_tree->txns_to_unlock);
if (r != 0) assert(r == 0);
goto cleanup;
tmp_tree->buflen = __toku_default_buflen; tmp_tree->buflen = __toku_default_buflen;
tmp_tree->buf = (toku_range*) tmp_tree->buf = (toku_range*) toku_xmalloc(tmp_tree->buflen * sizeof(toku_range));
toku_malloc(tmp_tree->buflen * sizeof(toku_range));
if (!tmp_tree->buf) { r = ENOMEM; goto cleanup; }
tmp_tree->bw_buflen = __toku_default_buflen; tmp_tree->bw_buflen = __toku_default_buflen;
tmp_tree->bw_buf = (toku_range*) tmp_tree->bw_buf = (toku_range*) toku_xmalloc(tmp_tree->bw_buflen * sizeof(toku_range));
toku_malloc(tmp_tree->bw_buflen * sizeof(toku_range));
if (!tmp_tree->bw_buf) { r = ENOMEM; goto cleanup; }
tmp_tree->verify_buflen = 0; tmp_tree->verify_buflen = 0;
tmp_tree->verify_buf = NULL; tmp_tree->verify_buf = NULL;
r = toku_omt_create(&tmp_tree->dbs); r = toku_omt_create(&tmp_tree->dbs);
if (r != 0) assert(r == 0);
goto cleanup;
lock_request_tree_init(tmp_tree); lock_request_tree_init(tmp_tree);
toku_mutex_init(&tmp_tree->mutex, NULL); toku_mutex_init(&tmp_tree->mutex, NULL);
tmp_tree->mutex_locked = false; tmp_tree->mutex_locked = false;
...@@ -1395,18 +1364,6 @@ toku_lt_create(toku_lock_tree** ptree, ...@@ -1395,18 +1364,6 @@ toku_lt_create(toku_lock_tree** ptree,
cleanup: cleanup:
if (r != 0) { if (r != 0) {
if (tmp_tree) { if (tmp_tree) {
if (tmp_tree->borderwrite)
toku_rt_close(tmp_tree->borderwrite);
if (tmp_tree->rth)
toku_rth_close(tmp_tree->rth);
if (tmp_tree->txns_to_unlock)
toku_rth_close(tmp_tree->txns_to_unlock);
if (tmp_tree->buf)
toku_free(tmp_tree->buf);
if (tmp_tree->bw_buf)
toku_free(tmp_tree->bw_buf);
if (tmp_tree->dbs)
toku_omt_destroy(&tmp_tree->dbs);
toku_free(tmp_tree); toku_free(tmp_tree);
} }
} }
...@@ -1556,7 +1513,7 @@ cleanup: ...@@ -1556,7 +1513,7 @@ cleanup:
static int static int
lt_try_acquire_range_read_lock(toku_lock_tree* tree, DB* db, TXNID txn, const DBT* key_left, const DBT* key_right) { lt_try_acquire_range_read_lock(toku_lock_tree* tree, DB* db, TXNID txn, const DBT* key_left, const DBT* key_right) {
assert(tree->mutex_locked); // locked by this thread assert(tree && tree->mutex_locked); // locked by this thread
int r; int r;
toku_point left; toku_point left;
...@@ -1807,8 +1764,8 @@ cleanup: ...@@ -1807,8 +1764,8 @@ cleanup:
// run escalation algorithm on a given locktree // run escalation algorithm on a given locktree
static int static int
lt_do_escalation(toku_lock_tree* lt) { lt_do_escalation(toku_lock_tree* lt) {
assert(lt); assert(lt && lt->mutex_locked);
assert(lt->mutex_locked);
int r = ENOSYS; int r = ENOSYS;
DB* db; // extract db from lt DB* db; // extract db from lt
OMTVALUE dbv; OMTVALUE dbv;
...@@ -1846,21 +1803,17 @@ cleanup: ...@@ -1846,21 +1803,17 @@ cleanup:
static int static int
ltm_do_escalation(toku_ltm* mgr) { ltm_do_escalation(toku_ltm* mgr) {
assert(mgr); assert(mgr);
int r = ENOSYS; int r = 0;
toku_lock_tree* lt = NULL;
ltm_mutex_lock(mgr); ltm_mutex_lock(mgr);
toku_lth_start_scan(mgr->lth); // initialize iterator in mgr toku_lth_start_scan(mgr->lth); // initialize iterator in mgr
toku_lock_tree* lt;
while ((lt = toku_lth_next(mgr->lth)) != NULL) { while ((lt = toku_lth_next(mgr->lth)) != NULL) {
lt_mutex_lock(lt); lt_mutex_lock(lt);
r = lt_do_escalation(lt); r = lt_do_escalation(lt);
lt_mutex_unlock(lt); lt_mutex_unlock(lt);
if (r != 0) if (r != 0)
goto cleanup; break;
} }
r = 0;
cleanup:
ltm_mutex_unlock(mgr); ltm_mutex_unlock(mgr);
return r; return r;
} }
...@@ -2163,10 +2116,8 @@ lt_border_delete(toku_lock_tree* tree, toku_range_tree* rt) { ...@@ -2163,10 +2116,8 @@ lt_border_delete(toku_lock_tree* tree, toku_range_tree* rt) {
static inline int static inline int
lt_unlock_txn(toku_lock_tree* tree, TXNID txn) { lt_unlock_txn(toku_lock_tree* tree, TXNID txn) {
assert(tree->mutex_locked); assert(tree && tree->mutex_locked);
if (!tree)
return EINVAL;
int r; int r;
toku_range_tree *selfwrite = toku_lt_ifexist_selfwrite(tree, txn); toku_range_tree *selfwrite = toku_lt_ifexist_selfwrite(tree, txn);
toku_range_tree *selfread = toku_lt_ifexist_selfread (tree, txn); toku_range_tree *selfread = toku_lt_ifexist_selfread (tree, txn);
...@@ -2450,7 +2401,7 @@ toku_lock_request_wait(toku_lock_request *lock_request, toku_lock_tree *tree, st ...@@ -2450,7 +2401,7 @@ toku_lock_request_wait(toku_lock_request *lock_request, toku_lock_tree *tree, st
int int
toku_lock_request_wait_with_default_timeout(toku_lock_request *lock_request, toku_lock_tree *tree) { toku_lock_request_wait_with_default_timeout(toku_lock_request *lock_request, toku_lock_tree *tree) {
return lock_request_wait(lock_request, tree, &tree->mgr->lock_wait_time, false); return toku_lock_request_wait(lock_request, tree, &tree->mgr->lock_wait_time);
} }
static void static void
...@@ -2559,7 +2510,7 @@ static void lt_check_deadlock(toku_lock_tree *tree, toku_lock_request *a_lock_re ...@@ -2559,7 +2510,7 @@ static void lt_check_deadlock(toku_lock_tree *tree, toku_lock_request *a_lock_re
static int static int
lock_request_start(toku_lock_request *lock_request, toku_lock_tree *tree, bool copy_keys_if_not_granted, bool do_escalation) { lock_request_start(toku_lock_request *lock_request, toku_lock_tree *tree, bool copy_keys_if_not_granted, bool do_escalation) {
assert(lock_request->state == LOCK_REQUEST_INIT); assert(lock_request->state == LOCK_REQUEST_INIT);
assert(tree->mutex_locked); assert(tree && tree->mutex_locked);
int r = 0; int r = 0;
switch (lock_request->type) { switch (lock_request->type) {
case LOCK_REQUEST_READ: case LOCK_REQUEST_READ:
...@@ -2632,7 +2583,7 @@ toku_lt_acquire_lock_request_with_default_timeout(toku_lock_tree *tree, toku_loc ...@@ -2632,7 +2583,7 @@ toku_lt_acquire_lock_request_with_default_timeout(toku_lock_tree *tree, toku_loc
static void static void
lt_retry_lock_requests(toku_lock_tree *tree) { lt_retry_lock_requests(toku_lock_tree *tree) {
assert(tree->mutex_locked); assert(tree && tree->mutex_locked);
for (uint32_t i = 0; i < toku_omt_size(tree->lock_requests); ) { for (uint32_t i = 0; i < toku_omt_size(tree->lock_requests); ) {
int r; int r;
......
...@@ -9,6 +9,8 @@ int main(void) { ...@@ -9,6 +9,8 @@ int main(void) {
r = toku_ltm_create(&mgr, max_locks, max_lock_memory, dbpanic); r = toku_ltm_create(&mgr, max_locks, max_lock_memory, dbpanic);
CKERR(r); CKERR(r);
r = toku_ltm_open(mgr);
CKERR(r);
{ {
r = toku_lt_create(&lt, mgr, dbcmp); r = toku_lt_create(&lt, mgr, dbcmp);
......
...@@ -10,7 +10,14 @@ static int write_lock(toku_lock_tree *lt, TXNID txnid, char *k) { ...@@ -10,7 +10,14 @@ static int write_lock(toku_lock_tree *lt, TXNID txnid, char *k) {
DBT key; dbt_init(&key, k, strlen(k)); DBT key; dbt_init(&key, k, strlen(k));
toku_lock_request lr; toku_lock_request lr;
toku_lock_request_init(&lr, (DB*)1, txnid, &key, &key, LOCK_REQUEST_WRITE); toku_lock_request_init(&lr, (DB*)1, txnid, &key, &key, LOCK_REQUEST_WRITE);
int r = toku_lt_acquire_lock_request_with_timeout(lt, &lr, NULL); int r;
if (0) {
r = toku_lt_acquire_lock_request_with_timeout(lt, &lr, NULL);
} else {
r = toku_lock_request_start(&lr, lt, true);
if (r == DB_LOCK_NOTGRANTED)
r = toku_lock_request_wait_with_default_timeout(&lr, lt);
}
toku_lock_request_destroy(&lr); toku_lock_request_destroy(&lr);
return r; return r;
} }
...@@ -67,6 +74,7 @@ int main(int argc, const char *argv[]) { ...@@ -67,6 +74,7 @@ int main(int argc, const char *argv[]) {
toku_ltm *ltm = NULL; toku_ltm *ltm = NULL;
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic); r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic);
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_ltm_set_lock_wait_time(ltm, UINT64_MAX);
DB *fake_db = (DB *) 1; DB *fake_db = (DB *) 1;
......
...@@ -41,9 +41,7 @@ toku__rt_decrease_capacity(toku_range_tree* tree, u_int32_t _num) { ...@@ -41,9 +41,7 @@ toku__rt_decrease_capacity(toku_range_tree* tree, u_int32_t _num) {
temp_len /= 2; temp_len /= 2;
assert(temp_len >= _num); //Sanity check. assert(temp_len >= _num); //Sanity check.
size_t start_size = toku_rt_memory_size(tree); size_t start_size = toku_rt_memory_size(tree);
toku_range* temp_ranges = toku_realloc(tree->i.ranges, temp_len * sizeof(toku_range)); toku_range* temp_ranges = toku_xrealloc(tree->i.ranges, temp_len * sizeof(toku_range));
if (!temp_ranges)
return errno;
tree->i.ranges = temp_ranges; tree->i.ranges = temp_ranges;
tree->i.ranges_len = temp_len; tree->i.ranges_len = temp_len;
size_t end_size = toku_rt_memory_size(tree); size_t end_size = toku_rt_memory_size(tree);
...@@ -63,9 +61,7 @@ toku__rt_increase_capacity(toku_range_tree* tree, u_int32_t num) { ...@@ -63,9 +61,7 @@ toku__rt_increase_capacity(toku_range_tree* tree, u_int32_t num) {
while (temp_len < num) while (temp_len < num)
temp_len *= 2; temp_len *= 2;
size_t start_size = toku_rt_memory_size(tree); size_t start_size = toku_rt_memory_size(tree);
toku_range* temp_ranges = toku_realloc(tree->i.ranges, temp_len * sizeof(toku_range)); toku_range* temp_ranges = toku_xrealloc(tree->i.ranges, temp_len * sizeof(toku_range));
if (!temp_ranges)
return errno;
tree->i.ranges = temp_ranges; tree->i.ranges = temp_ranges;
tree->i.ranges_len = temp_len; tree->i.ranges_len = temp_len;
size_t end_size = toku_rt_memory_size(tree); size_t end_size = toku_rt_memory_size(tree);
...@@ -133,21 +129,13 @@ toku_rt_create(toku_range_tree** ptree, ...@@ -133,21 +129,13 @@ toku_rt_create(toku_range_tree** ptree,
if (!ptree) if (!ptree)
return EINVAL; return EINVAL;
r = toku_rt_super_create(ptree, &tmptree, end_cmp, data_cmp, allow_overlaps, incr_memory_size, decr_memory_size, extra_memory_size); r = toku_rt_super_create(ptree, &tmptree, end_cmp, data_cmp, allow_overlaps, incr_memory_size, decr_memory_size, extra_memory_size);
if (0) {
died1:
toku_free(tmptree);
return r;
}
if (r != 0) if (r != 0)
return r; return r;
//Any local initializers go here. //Any local initializers go here.
tmptree->i.numelements = 0; tmptree->i.numelements = 0;
tmptree->i.ranges_len = minlen; tmptree->i.ranges_len = minlen;
tmptree->i.ranges = (toku_range*) toku_malloc(tmptree->i.ranges_len * sizeof(toku_range)); tmptree->i.ranges = (toku_range*) toku_xmalloc(tmptree->i.ranges_len * sizeof(toku_range));
if (!tmptree->i.ranges) {
r = errno; goto died1;
}
tmptree->incr_memory_size(tmptree->extra_memory_size, toku_rt_memory_size(tmptree)); tmptree->incr_memory_size(tmptree->extra_memory_size, toku_rt_memory_size(tmptree));
*ptree = tmptree; *ptree = tmptree;
...@@ -184,8 +172,7 @@ toku_rt_find(toku_range_tree* tree, toku_interval* query, u_int32_t k, ...@@ -184,8 +172,7 @@ toku_rt_find(toku_range_tree* tree, toku_interval* query, u_int32_t k,
for (u_int32_t i = 0; i < tree->i.numelements; i++) { for (u_int32_t i = 0; i < tree->i.numelements; i++) {
if (toku__rt_overlap(tree, query, &tree->i.ranges[i].ends)) { if (toku__rt_overlap(tree, query, &tree->i.ranges[i].ends)) {
r = toku__rt_increase_buffer(tree, buf, buflen, temp_numfound + 1); r = toku__rt_increase_buffer(tree, buf, buflen, temp_numfound + 1);
if (r != 0) assert_zero(r);
return r;
(*buf)[temp_numfound++] = tree->i.ranges[i]; (*buf)[temp_numfound++] = tree->i.ranges[i];
//k == 0 means limit of infinity, this is not a bug. //k == 0 means limit of infinity, this is not a bug.
if (temp_numfound == k) if (temp_numfound == k)
...@@ -222,8 +209,8 @@ toku_rt_insert(toku_range_tree* tree, toku_range* range) { ...@@ -222,8 +209,8 @@ toku_rt_insert(toku_range_tree* tree, toku_range* range) {
} }
/* Goes in slot 'i' */ /* Goes in slot 'i' */
r = toku__rt_increase_capacity(tree, tree->i.numelements + 1); r = toku__rt_increase_capacity(tree, tree->i.numelements + 1);
if (r != 0) assert_zero(r);
return r;
tree->i.numelements++; tree->i.numelements++;
/* Shift to make room. */ /* Shift to make room. */
for (u_int32_t move = tree->i.numelements - 1; move > i; move--) { for (u_int32_t move = tree->i.numelements - 1; move > i; move--) {
......
...@@ -40,24 +40,25 @@ toku_rt_create(toku_range_tree** ptree, ...@@ -40,24 +40,25 @@ toku_rt_create(toku_range_tree** ptree,
int r = ENOSYS; int r = ENOSYS;
toku_range_tree* tmptree = NULL; toku_range_tree* tmptree = NULL;
if (allow_overlaps) if (allow_overlaps) {
return EINVAL; r = EINVAL;
goto cleanup;
}
r = toku_rt_super_create(ptree, &tmptree, end_cmp, data_cmp, allow_overlaps, incr_memory_size, decr_memory_size, extra_memory_size); r = toku_rt_super_create(ptree, &tmptree, end_cmp, data_cmp, allow_overlaps, incr_memory_size, decr_memory_size, extra_memory_size);
if (r != 0) if (r != 0)
goto cleanup; goto cleanup;
//Any local initializers go here. //Any local initializers go here.
r = toku_omt_create(&tmptree->i.omt); r = toku_omt_create(&tmptree->i.omt);
if (r != 0) assert_zero(r);
goto cleanup;
tmptree->incr_memory_size(tmptree->extra_memory_size, toku_rt_memory_size(tmptree)); tmptree->incr_memory_size(tmptree->extra_memory_size, toku_rt_memory_size(tmptree));
*ptree = tmptree; *ptree = tmptree;
r = 0; r = 0;
cleanup: cleanup:
if (r != 0) { if (r != 0) {
if (tmptree) assert(tmptree == NULL);
toku_free(tmptree);
} }
return r; return r;
} }
...@@ -185,10 +186,8 @@ toku_rt_find(toku_range_tree* tree, toku_interval* query, u_int32_t k, ...@@ -185,10 +186,8 @@ toku_rt_find(toku_range_tree* tree, toku_interval* query, u_int32_t k,
r = toku_omt_iterate_on_range(tree->i.omt, leftmost, rightmost, rt_find_helper, &info); r = toku_omt_iterate_on_range(tree->i.omt, leftmost, rightmost, rt_find_helper, &info);
if (r == TOKUDB_SUCCEEDED_EARLY) if (r == TOKUDB_SUCCEEDED_EARLY)
r = 0; r = 0;
if (r != 0) if (r == 0)
goto cleanup; *numfound = info.numfound;
*numfound = info.numfound;
r = 0;
cleanup: cleanup:
return r; return r;
} }
...@@ -226,8 +225,7 @@ toku_rt_insert(toku_range_tree* tree, toku_range* range) { ...@@ -226,8 +225,7 @@ toku_rt_insert(toku_range_tree* tree, toku_range* range) {
r = 0; r = 0;
cleanup: cleanup:
if (r != 0) { if (r != 0) {
if (insert_range) toku_free(insert_range);
toku_free(insert_range);
} }
return r; return r;
} }
......
...@@ -58,9 +58,7 @@ static inline int toku__rt_increase_buffer(toku_range_tree* tree UU(), toku_rang ...@@ -58,9 +58,7 @@ static inline int toku__rt_increase_buffer(toku_range_tree* tree UU(), toku_rang
temp_len = 1; temp_len = 1;
while (temp_len < num) while (temp_len < num)
temp_len *= 2; temp_len *= 2;
toku_range* temp_buf = toku_realloc(*buf, temp_len * sizeof(toku_range)); toku_range* temp_buf = toku_xrealloc(*buf, temp_len * sizeof(toku_range));
if (!temp_buf)
return errno;
*buf = temp_buf; *buf = temp_buf;
*buflen = temp_len; *buflen = temp_len;
} }
...@@ -77,13 +75,10 @@ toku_rt_super_create(toku_range_tree** upperptree, ...@@ -77,13 +75,10 @@ toku_rt_super_create(toku_range_tree** upperptree,
void (*decr_memory_size)(void *extra_memory_size, size_t s), void (*decr_memory_size)(void *extra_memory_size, size_t s),
void *extra_memory_size) { void *extra_memory_size) {
toku_range_tree* temptree;
if (!upperptree || !ptree || !end_cmp || !data_cmp) if (!upperptree || !ptree || !end_cmp || !data_cmp)
return EINVAL; return EINVAL;
temptree = (toku_range_tree*) toku_malloc(sizeof(toku_range_tree)); toku_range_tree* temptree = (toku_range_tree*) toku_xmalloc(sizeof(toku_range_tree));
if (!temptree)
return ENOMEM;
//Any initializers go here. //Any initializers go here.
temptree->end_cmp = end_cmp; temptree->end_cmp = end_cmp;
......
...@@ -169,6 +169,11 @@ int main(int argc, const char *argv[]) { ...@@ -169,6 +169,11 @@ int main(int argc, const char *argv[]) {
r = toku_rt_close(tree); CKERR(r); r = toku_rt_close(tree); CKERR(r);
tree = NULL; tree = NULL;
#ifdef TOKU_RT_NOOVERLAPS
r = toku_rt_create(&tree, int_cmp, TXNID_cmp, true, test_incr_memory_size, test_decr_memory_size, NULL);
CKERR2(r, EINVAL);
#endif
/* That's it: clean up and go home */ /* That's it: clean up and go home */
toku_free(buf); toku_free(buf);
buf = NULL; buf = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment