Commit 0c8e8467 authored by John Esmet's avatar John Esmet Committed by Yoni Fogel

[t:4838] the locktree now opens its own ft handle when created to ensure that...

[t:4838] the locktree now opens its own ft handle when created to ensure that the ft stays in memory so long as the lock tree is in memory. we use an on_create and on_destroy callback to keep the txn/ft logic out of the locktree. the api for toku_ltm_get_lt is a little hairy now. a better solution may come in the future. for now, the original failing test passes.


git-svn-id: file:///svn/toku/tokudb@44819 c7de825b-a66e-492c-adef-691d508d4ae1
parent e4ef93b4
...@@ -3314,6 +3314,34 @@ toku_ft_handle_open(FT_HANDLE t, const char *fname_in_env, int is_create, int on ...@@ -3314,6 +3314,34 @@ toku_ft_handle_open(FT_HANDLE t, const char *fname_in_env, int is_create, int on
return r; return r;
} }
// clone an ft handle. the cloned handle has a new dict_id but refers to the same fractal tree
int
toku_ft_handle_clone(FT_HANDLE *cloned_ft_handle, FT_HANDLE ft_handle, TOKUTXN txn) {
int r;
FT_HANDLE result_ft_handle;
r = toku_ft_handle_create(&result_ft_handle);
invariant_zero(r);
// we're cloning, so the handle better have an open ft and open cf
invariant(ft_handle->ft);
invariant(ft_handle->ft->cf);
// inherit the options of the ft whose handle is being cloned.
toku_ft_handle_inherit_options(result_ft_handle, ft_handle->ft);
// we can clone the handle by creating a new handle with the same fname
CACHEFILE cf = ft_handle->ft->cf;
CACHETABLE ct = toku_cachefile_get_cachetable(cf);
const char *fname_in_env = toku_cachefile_fname_in_env(cf);
r = toku_ft_handle_open(result_ft_handle, fname_in_env, false, false, ct, txn);
if (r != 0) {
toku_ft_handle_close(result_ft_handle);
result_ft_handle = NULL;
}
*cloned_ft_handle = result_ft_handle;
return r;
}
// Open a brt in normal use. The FILENUM and dict_id are assigned by the ft_handle_open() function. // Open a brt in normal use. The FILENUM and dict_id are assigned by the ft_handle_open() function.
int int
toku_ft_handle_open_with_dict_id( toku_ft_handle_open_with_dict_id(
......
...@@ -99,6 +99,9 @@ int toku_ft_handle_open(FT_HANDLE, const char *fname_in_env, ...@@ -99,6 +99,9 @@ int toku_ft_handle_open(FT_HANDLE, const char *fname_in_env,
int toku_ft_handle_open_recovery(FT_HANDLE, const char *fname_in_env, int is_create, int only_create, CACHETABLE ct, TOKUTXN txn, int toku_ft_handle_open_recovery(FT_HANDLE, const char *fname_in_env, int is_create, int only_create, CACHETABLE ct, TOKUTXN txn,
FILENUM use_filenum, LSN max_acceptable_lsn) __attribute__ ((warn_unused_result)); FILENUM use_filenum, LSN max_acceptable_lsn) __attribute__ ((warn_unused_result));
// clone an ft handle. the cloned handle has a new dict_id but refers to the same fractal tree
int toku_ft_handle_clone(FT_HANDLE *cloned_ft_handle, FT_HANDLE ft_handle, TOKUTXN txn);
// close an ft handle during normal operation. the underlying ft may or may not close, // close an ft handle during normal operation. the underlying ft may or may not close,
// depending if there are still references. an lsn for this close will come from the logger. // depending if there are still references. an lsn for this close will come from the logger.
void toku_ft_handle_close(FT_HANDLE ft_handle); void toku_ft_handle_close(FT_HANDLE ft_handle);
......
...@@ -81,6 +81,11 @@ struct __toku_lock_tree { ...@@ -81,6 +81,11 @@ struct __toku_lock_tree {
uint32_t bw_buflen; uint32_t bw_buflen;
toku_range* verify_buf; toku_range* verify_buf;
uint32_t verify_buflen; uint32_t verify_buflen;
// reserved for the lock tree user's data
void *userdata;
// first thing called when toku_lt_close() happens
toku_lt_on_close_cb on_close_callback;
}; };
toku_range_tree* toku_lt_ifexist_selfread(toku_lock_tree* tree, TXNID txn); toku_range_tree* toku_lt_ifexist_selfread(toku_lock_tree* tree, TXNID txn);
......
...@@ -1283,8 +1283,9 @@ toku_lt_create(toku_lock_tree** ptree, ...@@ -1283,8 +1283,9 @@ toku_lt_create(toku_lock_tree** ptree,
r = EINVAL; goto cleanup; r = EINVAL; goto cleanup;
} }
tmp_tree = (toku_lock_tree*)toku_xmalloc(sizeof(*tmp_tree)); // allocate a tree, initialized to zeroes
memset(tmp_tree, 0, sizeof(toku_lock_tree)); tmp_tree = toku_xmalloc(sizeof(*tmp_tree));
memset(tmp_tree, 0, sizeof(*tmp_tree));
tmp_tree->mgr = mgr; tmp_tree->mgr = mgr;
tmp_tree->compare_fun = compare_fun; tmp_tree->compare_fun = compare_fun;
tmp_tree->lock_escalation_allowed = TRUE; tmp_tree->lock_escalation_allowed = TRUE;
...@@ -1297,9 +1298,6 @@ toku_lt_create(toku_lock_tree** ptree, ...@@ -1297,9 +1298,6 @@ toku_lt_create(toku_lock_tree** ptree,
tmp_tree->buf = (toku_range*) toku_xmalloc(tmp_tree->buflen * sizeof(toku_range)); tmp_tree->buf = (toku_range*) toku_xmalloc(tmp_tree->buflen * sizeof(toku_range));
tmp_tree->bw_buflen = __toku_default_buflen; tmp_tree->bw_buflen = __toku_default_buflen;
tmp_tree->bw_buf = (toku_range*) toku_xmalloc(tmp_tree->bw_buflen * sizeof(toku_range)); tmp_tree->bw_buf = (toku_range*) toku_xmalloc(tmp_tree->bw_buflen * sizeof(toku_range));
tmp_tree->verify_buflen = 0;
tmp_tree->verify_buf = NULL;
assert(r == 0);
lock_request_tree_init(tmp_tree); lock_request_tree_init(tmp_tree);
toku_mutex_init(&tmp_tree->mutex, NULL); toku_mutex_init(&tmp_tree->mutex, NULL);
tmp_tree->ref_count = 1; tmp_tree->ref_count = 1;
...@@ -1345,7 +1343,9 @@ toku_lt_update_descriptor(toku_lock_tree* tree, DESCRIPTOR desc) { ...@@ -1345,7 +1343,9 @@ toku_lt_update_descriptor(toku_lock_tree* tree, DESCRIPTOR desc) {
} }
int int
toku_ltm_get_lt(toku_ltm* mgr, toku_lock_tree** ptree, DICTIONARY_ID dict_id, DESCRIPTOR desc, toku_dbt_cmp compare_fun) { toku_ltm_get_lt(toku_ltm* mgr, toku_lock_tree** ptree, DICTIONARY_ID dict_id, DESCRIPTOR desc,
toku_dbt_cmp compare_fun, toku_lt_on_create_cb on_create_callback, void *on_create_extra,
toku_lt_on_close_cb on_close_callback) {
/* first look in hash table to see if lock tree exists for that db, /* first look in hash table to see if lock tree exists for that db,
if so return it */ if so return it */
int r = ENOSYS; int r = ENOSYS;
...@@ -1370,6 +1370,12 @@ toku_ltm_get_lt(toku_ltm* mgr, toku_lock_tree** ptree, DICTIONARY_ID dict_id, DE ...@@ -1370,6 +1370,12 @@ toku_ltm_get_lt(toku_ltm* mgr, toku_lock_tree** ptree, DICTIONARY_ID dict_id, DE
if (r != 0) { if (r != 0) {
goto cleanup; goto cleanup;
} }
// we just created the locktree, so call the callback if necessary
if (on_create_callback) {
on_create_callback(tree, on_create_extra);
}
// set the on close callback
tree->on_close_callback = on_close_callback;
lt_set_dict_id(tree, dict_id); lt_set_dict_id(tree, dict_id);
/* add tree to ltm */ /* add tree to ltm */
...@@ -1412,6 +1418,10 @@ cleanup: ...@@ -1412,6 +1418,10 @@ cleanup:
toku_lt_close(tree); toku_lt_close(tree);
} }
mgr->STATUS_VALUE(LTM_LT_CREATE_FAIL)++; mgr->STATUS_VALUE(LTM_LT_CREATE_FAIL)++;
// need to make sure the on close callback is called
// here, otherwise we might leak something from the
// on create callback.
on_close_callback(tree);
} }
ltm_mutex_unlock(mgr); ltm_mutex_unlock(mgr);
return r; return r;
...@@ -1424,6 +1434,10 @@ toku_lt_close(toku_lock_tree* tree) { ...@@ -1424,6 +1434,10 @@ toku_lt_close(toku_lock_tree* tree) {
if (!tree) { if (!tree) {
r = EINVAL; goto cleanup; r = EINVAL; goto cleanup;
} }
// call the on close callback if necessary
if (tree->on_close_callback) {
tree->on_close_callback(tree);
}
tree->mgr->STATUS_VALUE(LTM_LT_DESTROY)++; tree->mgr->STATUS_VALUE(LTM_LT_DESTROY)++;
tree->mgr->STATUS_VALUE(LTM_LT_NUM)--; tree->mgr->STATUS_VALUE(LTM_LT_NUM)--;
lock_request_tree_destroy(tree); lock_request_tree_destroy(tree);
...@@ -2183,6 +2197,16 @@ toku_lt_remove_db_ref(toku_lock_tree* tree) { ...@@ -2183,6 +2197,16 @@ toku_lt_remove_db_ref(toku_lock_tree* tree) {
assert_zero(r); assert_zero(r);
} }
void
toku_lt_set_userdata(toku_lock_tree *tree, void *userdata) {
tree->userdata = userdata;
}
void *
toku_lt_get_userdata(toku_lock_tree *tree) {
return tree->userdata;
}
static void static void
lock_request_init_wait(toku_lock_request *lock_request) { lock_request_init_wait(toku_lock_request *lock_request) {
if (!lock_request->wait_initialized) { if (!lock_request->wait_initialized) {
......
...@@ -43,6 +43,11 @@ char* toku_lt_strerror(TOKU_LT_ERROR r /**< Error code */) ...@@ -43,6 +43,11 @@ char* toku_lt_strerror(TOKU_LT_ERROR r /**< Error code */)
typedef struct __toku_lock_tree toku_lock_tree; typedef struct __toku_lock_tree toku_lock_tree;
#endif #endif
// called by toku_ltm_get_lt if the lt needed to be created
typedef void (*toku_lt_on_create_cb)(toku_lock_tree *tree, void *extra);
// called by toku_lt_close when there are no more references to the lt
typedef void (*toku_lt_on_close_cb)(toku_lock_tree *tree);
#if !defined(TOKU_LTH_DEFINE) #if !defined(TOKU_LTH_DEFINE)
#define TOKU_LTH_DEFINE #define TOKU_LTH_DEFINE
typedef struct __toku_lth toku_lth; typedef struct __toku_lth toku_lth;
...@@ -114,9 +119,12 @@ void ...@@ -114,9 +119,12 @@ void
toku_lt_update_descriptor(toku_lock_tree* tree, DESCRIPTOR desc); toku_lt_update_descriptor(toku_lock_tree* tree, DESCRIPTOR desc);
/** /**
Gets a lock tree for a given DB with id dict_id Gets a lock tree for a given DB with id dict_id. If the locktree is created,
the on_create_callback will be called with a pointer to the new tree and extra.
*/ */
int toku_ltm_get_lt(toku_ltm* mgr, toku_lock_tree** ptree, DICTIONARY_ID dict_id, DESCRIPTOR desc, toku_dbt_cmp compare_fun); int toku_ltm_get_lt(toku_ltm* mgr, toku_lock_tree** ptree, DICTIONARY_ID dict_id, DESCRIPTOR desc,
toku_dbt_cmp compare_fun, toku_lt_on_create_cb on_create_callback, void *on_create_extra,
toku_lt_on_close_cb on_close_callback);
void toku_ltm_invalidate_lt(toku_ltm* mgr, DICTIONARY_ID dict_id); void toku_ltm_invalidate_lt(toku_ltm* mgr, DICTIONARY_ID dict_id);
...@@ -320,6 +328,10 @@ void toku_lt_remove_db_ref(toku_lock_tree* tree); ...@@ -320,6 +328,10 @@ void toku_lt_remove_db_ref(toku_lock_tree* tree);
void toku_lt_verify(toku_lock_tree *tree); void toku_lt_verify(toku_lock_tree *tree);
void toku_lt_set_userdata(toku_lock_tree *tree, void *userdata);
void *toku_lt_get_userdata(toku_lock_tree *tree);
typedef enum { typedef enum {
LOCK_REQUEST_INIT = 0, LOCK_REQUEST_INIT = 0,
LOCK_REQUEST_PENDING = 1, LOCK_REQUEST_PENDING = 1,
......
...@@ -44,7 +44,7 @@ int main(int argc, const char *argv[]) { ...@@ -44,7 +44,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
TXNID txn_a = 1; TXNID txn_a = 1;
......
...@@ -144,7 +144,7 @@ int main(int argc, const char *argv[]) { ...@@ -144,7 +144,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
toku_pthread_t tids[nthreads]; toku_pthread_t tids[nthreads];
......
...@@ -47,7 +47,7 @@ static void setup_tree(void) { ...@@ -47,7 +47,7 @@ static void setup_tree(void) {
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic); r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic);
CKERR(r); CKERR(r);
assert(ltm); assert(ltm);
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
CKERR(r); CKERR(r);
assert(lt); assert(lt);
init_query(); init_query();
......
...@@ -41,7 +41,7 @@ static void setup_tree(void) { ...@@ -41,7 +41,7 @@ static void setup_tree(void) {
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic); r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic);
CKERR(r); CKERR(r);
assert(ltm); assert(ltm);
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
CKERR(r); CKERR(r);
assert(lt); assert(lt);
init_query(); init_query();
......
...@@ -44,7 +44,7 @@ static void setup_tree(void) { ...@@ -44,7 +44,7 @@ static void setup_tree(void) {
assert(ltm); assert(ltm);
//ask ltm for lock tree //ask ltm for lock tree
DICTIONARY_ID dict_id = {0x1234}; DICTIONARY_ID dict_id = {0x1234};
r = toku_ltm_get_lt(ltm, &lt, dict_id, NULL, intcmp); r = toku_ltm_get_lt(ltm, &lt, dict_id, NULL, intcmp, NULL, NULL, NULL);
CKERR(r); CKERR(r);
assert(lt); assert(lt);
......
...@@ -26,7 +26,7 @@ static void setup_ltm(void) { ...@@ -26,7 +26,7 @@ static void setup_ltm(void) {
static void setup_tree(size_t index, DICTIONARY_ID dict_id) { static void setup_tree(size_t index, DICTIONARY_ID dict_id) {
assert(!lt[index] && ltm); assert(!lt[index] && ltm);
r = toku_ltm_get_lt(ltm, &lt[index], dict_id, NULL, intcmp); r = toku_ltm_get_lt(ltm, &lt[index], dict_id, NULL, intcmp, NULL, NULL, NULL);
CKERR(r); CKERR(r);
assert(lt[index]); assert(lt[index]);
} }
......
...@@ -32,7 +32,7 @@ static void db_open_tree(size_t index, size_t db_id_index) { ...@@ -32,7 +32,7 @@ static void db_open_tree(size_t index, size_t db_id_index) {
(lt_refs[index] > 0 && lts[index])); (lt_refs[index] > 0 && lts[index]));
assert(ltm); assert(ltm);
lt_refs[index]++; lt_refs[index]++;
r = toku_ltm_get_lt(ltm, &lts[index], dict_ids[db_id_index], NULL, intcmp); r = toku_ltm_get_lt(ltm, &lts[index], dict_ids[db_id_index], NULL, intcmp, NULL, NULL, NULL);
CKERR(r); CKERR(r);
assert(lts[index]); assert(lts[index]);
} }
......
...@@ -41,7 +41,7 @@ static void setup_tree(void) { ...@@ -41,7 +41,7 @@ static void setup_tree(void) {
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic); r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic);
CKERR(r); CKERR(r);
assert(ltm); assert(ltm);
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
CKERR(r); CKERR(r);
assert(lt); assert(lt);
init_query(); init_query();
......
...@@ -51,7 +51,7 @@ int main(int argc, const char *argv[]) { ...@@ -51,7 +51,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
const TXNID txn_a = 1; const TXNID txn_a = 1;
......
...@@ -45,7 +45,7 @@ int main(int argc, const char *argv[]) { ...@@ -45,7 +45,7 @@ int main(int argc, const char *argv[]) {
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
const TXNID txn_a = 1; const TXNID txn_a = 1;
...@@ -76,7 +76,7 @@ int main(int argc, const char *argv[]) { ...@@ -76,7 +76,7 @@ int main(int argc, const char *argv[]) {
toku_lt_remove_ref(lt); toku_lt_remove_ref(lt);
// reopen the lock tree // reopen the lock tree
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
// txn_b gets W(L) // txn_b gets W(L)
......
...@@ -44,7 +44,7 @@ int main(int argc, const char *argv[]) { ...@@ -44,7 +44,7 @@ int main(int argc, const char *argv[]) {
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
DBT key_l; dbt_init(&key_l, "L", 1); DBT key_l; dbt_init(&key_l, "L", 1);
......
...@@ -49,7 +49,7 @@ int main(int argc, const char *argv[]) { ...@@ -49,7 +49,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
DBT key_l; dbt_init(&key_l, "L", 1); DBT key_l; dbt_init(&key_l, "L", 1);
......
...@@ -39,7 +39,7 @@ int main(int argc, const char *argv[]) { ...@@ -39,7 +39,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
const TXNID txn_a = 1; const TXNID txn_a = 1;
......
...@@ -39,7 +39,7 @@ int main(int argc, const char *argv[]) { ...@@ -39,7 +39,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
DBT key_l; dbt_init(&key_l, "L", 1); DBT key_l; dbt_init(&key_l, "L", 1);
......
...@@ -39,7 +39,7 @@ int main(int argc, const char *argv[]) { ...@@ -39,7 +39,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
DBT key_l; dbt_init(&key_l, "L", 1); DBT key_l; dbt_init(&key_l, "L", 1);
......
...@@ -56,7 +56,7 @@ int main(int argc, const char *argv[]) { ...@@ -56,7 +56,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
const TXNID txn_a = 1; const TXNID txn_a = 1;
......
...@@ -124,7 +124,7 @@ int main(int argc, const char *argv[]) { ...@@ -124,7 +124,7 @@ int main(int argc, const char *argv[]) {
assert(s.curr_lock_memory == 0); assert(s.curr_lock_memory == 0);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
TXNID txn_a = 1; TXNID txn_a = 1;
......
...@@ -126,7 +126,7 @@ int main(int argc, const char *argv[]) { ...@@ -126,7 +126,7 @@ int main(int argc, const char *argv[]) {
assert(s.curr_lock_memory == 0); assert(s.curr_lock_memory == 0);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
TXNID txn_a = 1; TXNID txn_a = 1;
......
...@@ -41,7 +41,7 @@ static void setup_tree(void) { ...@@ -41,7 +41,7 @@ static void setup_tree(void) {
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic); r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic);
CKERR(r); CKERR(r);
assert(ltm); assert(ltm);
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
CKERR(r); CKERR(r);
assert(lt); assert(lt);
init_query(); init_query();
......
...@@ -56,7 +56,7 @@ int main(int argc, const char *argv[]) { ...@@ -56,7 +56,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
const TXNID txn_a = 1; const TXNID txn_a = 1;
......
...@@ -66,7 +66,7 @@ int main(int argc, const char *argv[]) ...@@ -66,7 +66,7 @@ int main(int argc, const char *argv[])
r = toku_ltm_create(&ltm, MAX_LOCKS, MAX_LOCK_MEMORY, dbpanic); r = toku_ltm_create(&ltm, MAX_LOCKS, MAX_LOCK_MEMORY, dbpanic);
CKERR(r); CKERR(r);
current_descriptor = NULL; current_descriptor = NULL;
toku_ltm_get_lt(ltm, &tree, (DICTIONARY_ID) {1}, current_descriptor, cmp_function); toku_ltm_get_lt(ltm, &tree, (DICTIONARY_ID) {1}, current_descriptor, cmp_function, NULL, NULL, NULL);
CKERR(r); CKERR(r);
for (int d = 0; d < num_descriptors; d++) { for (int d = 0; d < num_descriptors; d++) {
......
...@@ -55,7 +55,7 @@ int main(int argc, const char *argv[]) { ...@@ -55,7 +55,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
const TXNID txn_a = 1; const TXNID txn_a = 1;
......
...@@ -44,7 +44,7 @@ int main(int argc, const char *argv[]) { ...@@ -44,7 +44,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
const TXNID txn_a = 1; const TXNID txn_a = 1;
......
...@@ -43,7 +43,7 @@ int main(int argc, const char *argv[]) { ...@@ -43,7 +43,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
const TXNID txn_a = 1; const TXNID txn_a = 1;
......
...@@ -42,7 +42,7 @@ int main(int argc, const char *argv[]) { ...@@ -42,7 +42,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
const TXNID txn_a = 1; const TXNID txn_a = 1;
......
...@@ -40,7 +40,7 @@ int main(int argc, const char *argv[]) { ...@@ -40,7 +40,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
// add a lock for a transaction // add a lock for a transaction
......
...@@ -42,7 +42,7 @@ int main(int argc, const char *argv[]) { ...@@ -42,7 +42,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
const TXNID txn_a = 1; const TXNID txn_a = 1;
......
...@@ -42,7 +42,7 @@ int main(int argc, const char *argv[]) { ...@@ -42,7 +42,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
const TXNID txn_a = 1; const TXNID txn_a = 1;
......
...@@ -80,7 +80,7 @@ int main(int argc, const char *argv[]) { ...@@ -80,7 +80,7 @@ int main(int argc, const char *argv[]) {
toku_ltm_set_lock_wait_time(ltm, UINT64_MAX); toku_ltm_set_lock_wait_time(ltm, UINT64_MAX);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
const TXNID txn_a = 1; const TXNID txn_a = 1;
......
...@@ -46,7 +46,7 @@ int main(int argc, const char *argv[]) { ...@@ -46,7 +46,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
const TXNID txn_a = 1; const TXNID txn_a = 1;
......
...@@ -41,7 +41,7 @@ static void setup_tree(void) { ...@@ -41,7 +41,7 @@ static void setup_tree(void) {
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic); r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic);
CKERR(r); CKERR(r);
assert(ltm); assert(ltm);
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
CKERR(r); CKERR(r);
assert(lt); assert(lt);
init_query(); init_query();
......
...@@ -41,7 +41,7 @@ static void setup_tree(void) { ...@@ -41,7 +41,7 @@ static void setup_tree(void) {
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic); r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic);
CKERR(r); CKERR(r);
assert(ltm); assert(ltm);
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
CKERR(r); CKERR(r);
assert(lt); assert(lt);
init_query(); init_query();
......
...@@ -41,7 +41,7 @@ static void setup_tree(void) { ...@@ -41,7 +41,7 @@ static void setup_tree(void) {
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic); r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic);
CKERR(r); CKERR(r);
assert(ltm); assert(ltm);
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
CKERR(r); CKERR(r);
assert(lt); assert(lt);
init_query(); init_query();
......
...@@ -40,7 +40,7 @@ int main(int argc, const char *argv[]) { ...@@ -40,7 +40,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
const TXNID txn_a = 1; const TXNID txn_a = 1;
......
...@@ -39,7 +39,7 @@ int main(int argc, const char *argv[]) { ...@@ -39,7 +39,7 @@ int main(int argc, const char *argv[]) {
assert(r == 0 && ltm); assert(r == 0 && ltm);
toku_lock_tree *lt = NULL; toku_lock_tree *lt = NULL;
r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp); r = toku_ltm_get_lt(ltm, &lt, (DICTIONARY_ID){1}, NULL, dbcmp, NULL, NULL, NULL);
assert(r == 0 && lt); assert(r == 0 && lt);
const TXNID txn_a = 1; const TXNID txn_a = 1;
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include "ydb_row_lock.h" #include "ydb_row_lock.h"
#include "ydb_db.h" #include "ydb_db.h"
#include "ydb_write.h" #include "ydb_write.h"
#include <lock_tree/locktree.h>
static YDB_DB_LAYER_STATUS_S ydb_db_layer_status; static YDB_DB_LAYER_STATUS_S ydb_db_layer_status;
#ifdef STATUS_VALUE #ifdef STATUS_VALUE
...@@ -325,11 +325,42 @@ db_set_descriptors(DB *db, FT_HANDLE ft_handle) { ...@@ -325,11 +325,42 @@ db_set_descriptors(DB *db, FT_HANDLE ft_handle) {
// the descriptor via db->descriptor, because // the descriptor via db->descriptor, because
// a redirect may be happening underneath the covers. // a redirect may be happening underneath the covers.
// Need to investigate further. // Need to investigate further.
static void db_on_redirect_callback(FT_HANDLE ft_handle, void* extra) { static void
db_on_redirect_callback(FT_HANDLE ft_handle, void* extra) {
DB *db = extra; DB *db = extra;
db_set_descriptors(db, ft_handle); db_set_descriptors(db, ft_handle);
} }
struct lt_on_create_callback_extra {
DB_TXN *txn;
FT_HANDLE ft_handle;
};
// when a locktree is created, clone a ft handle and store it
// as userdata so we can close it later.
static void
lt_on_create_callback(toku_lock_tree *lt, void *extra) {
int r;
struct lt_on_create_callback_extra *info = extra;
TOKUTXN ttxn = info->txn ? db_txn_struct_i(info->txn)->tokutxn : NULL;
FT_HANDLE ft_handle = info->ft_handle;
FT_HANDLE cloned_ft_handle;
r = toku_ft_handle_clone(&cloned_ft_handle, ft_handle, ttxn);
invariant_zero(r);
assert(toku_lt_get_userdata(lt) == NULL);
toku_lt_set_userdata(lt, cloned_ft_handle);
}
// when a locktree closes, get its ft handle as userdata and close it.
static void
lt_on_close_callback(toku_lock_tree *lt) {
FT_HANDLE ft_handle = toku_lt_get_userdata(lt);
assert(ft_handle);
toku_ft_handle_close(ft_handle);
}
int int
db_open_iname(DB * db, DB_TXN * txn, const char *iname_in_env, u_int32_t flags, int mode) { db_open_iname(DB * db, DB_TXN * txn, const char *iname_in_env, u_int32_t flags, int mode) {
int r; int r;
...@@ -395,8 +426,15 @@ db_open_iname(DB * db, DB_TXN * txn, const char *iname_in_env, u_int32_t flags, ...@@ -395,8 +426,15 @@ db_open_iname(DB * db, DB_TXN * txn, const char *iname_in_env, u_int32_t flags,
if (need_locktree) { if (need_locktree) {
db->i->dict_id = toku_ft_get_dictionary_id(db->i->ft_handle); db->i->dict_id = toku_ft_get_dictionary_id(db->i->ft_handle);
r = toku_ltm_get_lt(db->dbenv->i->ltm, &db->i->lt, db->i->dict_id, db->cmp_descriptor, toku_ft_get_bt_compare(db->i->ft_handle)); struct lt_on_create_callback_extra on_create_extra = {
if (r!=0) { goto error_cleanup; } .txn = txn,
.ft_handle = db->i->ft_handle,
};
r = toku_ltm_get_lt(db->dbenv->i->ltm, &db->i->lt, db->i->dict_id, db->cmp_descriptor,
toku_ft_get_bt_compare(db->i->ft_handle), lt_on_create_callback, &on_create_extra, lt_on_close_callback);
if (r != 0) {
goto error_cleanup;
}
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment