Commit f2daafe7 authored by Barry Perlman's avatar Barry Perlman Committed by Yoni Fogel

[t:3113] Merge from tokudb.3113, select latest header based on...

[t:3113] Merge from tokudb.3113, select latest header based on checkpoint_count, not checkpoint_lsn (file can be closed without intervening checkpoint).  Merge command in toku/tokudb was svn merge -r27478:HEAD ../tokudb.3113 .

git-svn-id: file:///svn/toku/tokudb@27500 c7de825b-a66e-492c-adef-691d508d4ae1
parent 523c5aae
......@@ -1749,78 +1749,66 @@ deserialize_brtheader_from_fd_into_rbuf(int fd, toku_off_t offset_of_header, str
// Read brtheader from file into struct. Read both headers and use one.
// If a required_lsn is specified, then use the header with that lsn,
// or if a header with that lsn is not available, then use the latest
// header with an earlier lsn than required_lsn. (There may not be a header
// with required_lsn if the file did not change since the last checkpoint.)
// If a required_lsn is not specified, then use the latest header.
// We want the latest acceptable header whose checkpoint_lsn is no later
// than max_acceptable_lsn.
int
toku_deserialize_brtheader_from (int fd, LSN required_lsn, struct brt_header **brth) {
toku_deserialize_brtheader_from (int fd, LSN max_acceptable_lsn, struct brt_header **brth) {
struct rbuf rb_0;
struct rbuf rb_1;
u_int64_t checkpoint_count_0;
u_int64_t checkpoint_count_1;
LSN checkpoint_lsn_0;
LSN checkpoint_lsn_1;
int r = 0;
int r0;
int r1;
u_int32_t version_0, version_1, version = 0;
BOOL h0_acceptable = FALSE;
BOOL h1_acceptable = FALSE;
struct rbuf *rb = NULL;
int r0, r1, r;
{
toku_off_t header_0_off = 0;
r0 = deserialize_brtheader_from_fd_into_rbuf(fd, header_0_off, &rb_0, &checkpoint_count_0, &checkpoint_lsn_0, &version_0);
if ( (r0==0) && (checkpoint_lsn_0.lsn <= max_acceptable_lsn.lsn) )
h0_acceptable = TRUE;
}
{
toku_off_t header_1_off = BLOCK_ALLOCATOR_HEADER_RESERVE;
r1 = deserialize_brtheader_from_fd_into_rbuf(fd, header_1_off, &rb_1, &checkpoint_count_1, &checkpoint_lsn_1, &version_1);
if ( (r1==0) && (checkpoint_lsn_1.lsn <= max_acceptable_lsn.lsn) )
h1_acceptable = TRUE;
}
struct rbuf *rb = NULL;
// if either header is too new, the dictionary is unreadable
if (r0!=TOKUDB_DICTIONARY_TOO_NEW && r1!=TOKUDB_DICTIONARY_TOO_NEW) {
if (r0==0) {
rb = &rb_0;
version = version_0;
}
if (r1==0 && (r0!=0 || checkpoint_count_1 > checkpoint_count_0)) {
rb = &rb_1;
version = version_1;
}
if (r0==0 && r1==0) {
lazy_assert(checkpoint_count_1 != checkpoint_count_0);
if (rb == &rb_0) lazy_assert(version_0 >= version_1);
else lazy_assert(version_0 <= version_1);
}
}
if (required_lsn.lsn != ZERO_LSN.lsn) { // an upper bound lsn was requested
if ((r0==0 && checkpoint_lsn_0.lsn <= required_lsn.lsn) &&
(r1 || checkpoint_lsn_1.lsn > required_lsn.lsn)) {
rb = &rb_0;
version = version_0;
}
else if ((r1==0 && checkpoint_lsn_1.lsn <= required_lsn.lsn) &&
(r0 || checkpoint_lsn_0.lsn > required_lsn.lsn)) {
rb = &rb_1;
version = version_1;
}
else if (r0==0 && r1==0) { // read two good headers and both have qualified lsn
if (checkpoint_lsn_0.lsn > checkpoint_lsn_1.lsn) {
if (h0_acceptable && h1_acceptable) {
if (checkpoint_count_0 > checkpoint_count_1) {
invariant(checkpoint_count_0 == checkpoint_count_1 + 1);
invariant(version_0 >= version_1);
rb = &rb_0;
version = version_0;
r = 0;
}
else {
invariant(checkpoint_count_1 == checkpoint_count_0 + 1);
invariant(version_1 >= version_0);
rb = &rb_1;
version = version_1;
r = 0;
}
}
else {
rb = NULL;
r = -1; // may need new error code for unable to get required version of file
else if (h0_acceptable) {
rb = &rb_0;
version = version_0;
r = 0;
}
else if (h1_acceptable) {
rb = &rb_1;
version = version_1;
r = 0;
}
}
if (rb==NULL && r==0) {
if (rb==NULL) {
// We were unable to read either header or at least one is too new.
// Certain errors are higher priority than others. Order of these if/else if is important.
if (r0==TOKUDB_DICTIONARY_TOO_NEW || r1==TOKUDB_DICTIONARY_TOO_NEW)
......@@ -1831,8 +1819,12 @@ toku_deserialize_brtheader_from (int fd, LSN required_lsn, struct brt_header **b
else if (r0==TOKUDB_DICTIONARY_NO_HEADER || r1==TOKUDB_DICTIONARY_NO_HEADER) {
r = TOKUDB_DICTIONARY_NO_HEADER;
}
else r = r0; //Arbitrarily report the error from the first header.
lazy_assert(r!=0);
else r = r0 ? r0 : r1; //Arbitrarily report the error from the first header, unless it's readable
// it should not be possible for both headers to be later than the max_acceptable_lsn
invariant(!( (r0==0 && checkpoint_lsn_0.lsn > max_acceptable_lsn.lsn) &&
(r1==0 && checkpoint_lsn_1.lsn > max_acceptable_lsn.lsn) ));
invariant(r!=0);
}
if (r==0) r = deserialize_brtheader_versioned(fd, rb, brth, version);
......
......@@ -8,7 +8,7 @@
BOOL ignore_if_was_already_open;
int toku_testsetup_leaf(BRT brt, BLOCKNUM *blocknum) {
BRTNODE node;
int r = toku_read_brt_header_and_store_in_cachefile(brt->cf, ZERO_LSN, &brt->h, &ignore_if_was_already_open);
int r = toku_read_brt_header_and_store_in_cachefile(brt->cf, MAX_LSN, &brt->h, &ignore_if_was_already_open);
if (r!=0) return r;
toku_create_new_brtnode(brt, &node, 0, 0);
......@@ -22,7 +22,7 @@ int toku_testsetup_leaf(BRT brt, BLOCKNUM *blocknum) {
int toku_testsetup_nonleaf (BRT brt, int height, BLOCKNUM *blocknum, int n_children, BLOCKNUM *children, u_int32_t *subtree_fingerprints, char **keys, int *keylens) {
BRTNODE node;
assert(n_children<=BRT_FANOUT);
int r = toku_read_brt_header_and_store_in_cachefile(brt->cf, ZERO_LSN, &brt->h, &ignore_if_was_already_open);
int r = toku_read_brt_header_and_store_in_cachefile(brt->cf, MAX_LSN, &brt->h, &ignore_if_was_already_open);
if (r!=0) return r;
toku_create_new_brtnode(brt, &node, height, 0);
node->u.n.n_children=n_children;
......@@ -47,7 +47,7 @@ int toku_testsetup_nonleaf (BRT brt, int height, BLOCKNUM *blocknum, int n_child
}
int toku_testsetup_root(BRT brt, BLOCKNUM blocknum) {
int r = toku_read_brt_header_and_store_in_cachefile(brt->cf, ZERO_LSN, &brt->h, &ignore_if_was_already_open);
int r = toku_read_brt_header_and_store_in_cachefile(brt->cf, MAX_LSN, &brt->h, &ignore_if_was_already_open);
if (r!=0) return r;
brt->h->root = blocknum;
brt->h->root_hash.valid = FALSE;
......
......@@ -3156,10 +3156,10 @@ int toku_brt_alloc_init_header(BRT t, TOKUTXN txn) {
return r;
}
int toku_read_brt_header_and_store_in_cachefile (CACHEFILE cf, LSN required_lsn, struct brt_header **header, BOOL* was_open)
int toku_read_brt_header_and_store_in_cachefile (CACHEFILE cf, LSN max_acceptable_lsn, struct brt_header **header, BOOL* was_open)
// If the cachefile already has the header, then just get it.
// If the cachefile has not been initialized, then don't modify anything.
// required_lsn is used if a particular version of the file is required based on checkpoint_lsn
// max_acceptable_lsn is the latest acceptable checkpointed version of the file.
{
{
struct brt_header *h;
......@@ -3174,7 +3174,7 @@ int toku_read_brt_header_and_store_in_cachefile (CACHEFILE cf, LSN required_lsn,
int r;
{
int fd = toku_cachefile_get_and_pin_fd (cf);
r = toku_deserialize_brtheader_from(fd, required_lsn, &h);
r = toku_deserialize_brtheader_from(fd, max_acceptable_lsn, &h);
toku_cachefile_unpin_fd(cf);
}
if (r!=0) return r;
......@@ -3273,10 +3273,10 @@ cleanup:
}
// This is the actual open, used for various purposes, such as normal use, recovery, and redirect.
// fname_in_env is the iname, relative to the env_dir (data_dir is already in iname as prefix)
// If required_lsn is specified (not ZERO_LSN), then that specific checkpointed version is required.
// fname_in_env is the iname, relative to the env_dir (data_dir is already in iname as prefix).
// The checkpointed version (checkpoint_lsn) of the dictionary must be no later than max_acceptable_lsn .
static int
brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHETABLE cachetable, TOKUTXN txn, DB *db, FILENUM use_filenum, DICTIONARY_ID use_dictionary_id, LSN required_lsn) {
brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHETABLE cachetable, TOKUTXN txn, DB *db, FILENUM use_filenum, DICTIONARY_ID use_dictionary_id, LSN max_acceptable_lsn) {
int r;
BOOL txn_created = FALSE;
......@@ -3342,7 +3342,7 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET
}
BOOL was_already_open;
if (is_create) {
r = toku_read_brt_header_and_store_in_cachefile(t->cf, required_lsn, &t->h, &was_already_open);
r = toku_read_brt_header_and_store_in_cachefile(t->cf, max_acceptable_lsn, &t->h, &was_already_open);
if (r==TOKUDB_DICTIONARY_NO_HEADER) {
r = toku_brt_alloc_init_header(t, txn);
if (r != 0) goto died_after_read_and_pin;
......@@ -3357,7 +3357,7 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET
}
else goto found_it;
} else {
if ((r = toku_read_brt_header_and_store_in_cachefile(t->cf, required_lsn, &t->h, &was_already_open))!=0) goto died_after_open;
if ((r = toku_read_brt_header_and_store_in_cachefile(t->cf, max_acceptable_lsn, &t->h, &was_already_open))!=0) goto died_after_open;
found_it:
t->nodesize = t->h->nodesize; /* inherit the pagesize from the file */
if (!t->did_set_flags) {
......@@ -3433,11 +3433,11 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET
// (dict_id is assigned by the brt_open() function.)
int
toku_brt_open_recovery(BRT t, const char *fname_in_env, int is_create, int only_create, CACHETABLE cachetable, TOKUTXN txn,
DB *db, FILENUM use_filenum, LSN required_lsn) {
DB *db, FILENUM use_filenum, LSN max_acceptable_lsn) {
int r;
lazy_assert(use_filenum.fileid != FILENUM_NONE.fileid);
r = brt_open(t, fname_in_env, is_create, only_create, cachetable,
txn, db, use_filenum, DICTIONARY_ID_NONE, required_lsn);
txn, db, use_filenum, DICTIONARY_ID_NONE, max_acceptable_lsn);
return r;
}
......@@ -3445,7 +3445,7 @@ toku_brt_open_recovery(BRT t, const char *fname_in_env, int is_create, int only_
int
toku_brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHETABLE cachetable, TOKUTXN txn, DB *db) {
int r;
r = brt_open(t, fname_in_env, is_create, only_create, cachetable, txn, db, FILENUM_NONE, DICTIONARY_ID_NONE, ZERO_LSN);
r = brt_open(t, fname_in_env, is_create, only_create, cachetable, txn, db, FILENUM_NONE, DICTIONARY_ID_NONE, MAX_LSN);
return r;
}
......@@ -3467,7 +3467,7 @@ brt_open_for_redirect(BRT *new_brtp, const char *fname_in_env, TOKUTXN txn, BRT
lazy_assert_zero(r);
}
CACHETABLE ct = toku_cachefile_get_cachetable(old_brt->cf);
r = brt_open(t, fname_in_env, 0, 0, ct, txn, old_brt->db, FILENUM_NONE, old_h->dict_id, ZERO_LSN);
r = brt_open(t, fname_in_env, 0, 0, ct, txn, old_brt->db, FILENUM_NONE, old_h->dict_id, MAX_LSN);
lazy_assert_zero(r);
if (old_h->descriptor.version==0) {
lazy_assert(t->h->descriptor.version == 0);
......
......@@ -76,7 +76,7 @@ static void
dump_header (int f, struct brt_header **header, CACHEFILE cf) {
struct brt_header *h;
int r;
r = toku_deserialize_brtheader_from (f, ZERO_LSN, &h);
r = toku_deserialize_brtheader_from (f, MAX_LSN, &h);
assert(r==0);
h->cf = cf;
printf("brtheader:\n");
......
......@@ -238,7 +238,7 @@ static void recover_yield(voidfp f, void *fpthunk, void *UU(yieldthunk)) {
// Open the file if it is not already open. If it is already open, then do nothing.
static int internal_recover_fopen_or_fcreate (RECOVER_ENV renv, BOOL must_create, int mode, BYTESTRING *bs_iname, FILENUM filenum, u_int32_t treeflags,
u_int32_t descriptor_version, BYTESTRING* descriptor, TOKUTXN txn, uint32_t nodesize, LSN required_lsn) {
u_int32_t descriptor_version, BYTESTRING* descriptor, TOKUTXN txn, uint32_t nodesize, LSN max_acceptable_lsn) {
int r;
char *iname = fixup_fname(bs_iname);
......@@ -271,7 +271,7 @@ static int internal_recover_fopen_or_fcreate (RECOVER_ENV renv, BOOL must_create
r = toku_brt_set_descriptor(brt, descriptor_version, &descriptor_dbt);
if (r != 0) goto close_brt;
}
r = toku_brt_open_recovery(brt, iname, must_create, must_create, renv->ct, txn, fake_db, filenum, required_lsn);
r = toku_brt_open_recovery(brt, iname, must_create, must_create, renv->ct, txn, fake_db, filenum, max_acceptable_lsn);
if (r != 0) {
close_brt:
;
......@@ -409,14 +409,15 @@ static int toku_recover_fassociate (struct logtype_fassociate *l, RECOVER_ENV re
case FORWARD_BETWEEN_CHECKPOINT_BEGIN_END:
renv->ss.checkpoint_num_fassociate++;
assert(r==DB_NOTFOUND); //Not open
// open it if it exists
// if rollback file, specify which checkpointed version of file we need (not just the latest)
// Open it if it exists.
// If rollback file, specify which checkpointed version of file we need (not just the latest)
// because we cannot use a rollback log that is later than the last complete checkpoint. See #3113.
{
BOOL rollback_file = !strcmp(fname, ROLLBACK_CACHEFILE_NAME);
LSN required_lsn = ZERO_LSN;
LSN max_acceptable_lsn = MAX_LSN;
if (rollback_file)
required_lsn = renv->ss.checkpoint_begin_lsn;
r = internal_recover_fopen_or_fcreate(renv, FALSE, 0, &l->iname, l->filenum, l->treeflags, 0, NULL, NULL, 0, required_lsn);
max_acceptable_lsn = renv->ss.checkpoint_begin_lsn;
r = internal_recover_fopen_or_fcreate(renv, FALSE, 0, &l->iname, l->filenum, l->treeflags, 0, NULL, NULL, 0, max_acceptable_lsn);
if (r==0 && rollback_file) {
//Load rollback cachefile
r = file_map_find(&renv->fmap, l->filenum, &tuple);
......@@ -645,7 +646,7 @@ static int toku_recover_fcreate (struct logtype_fcreate *l, RECOVER_ENV renv) {
toku_free(iname);
BOOL must_create = TRUE;
r = internal_recover_fopen_or_fcreate(renv, must_create, l->mode, &l->iname, l->filenum, l->treeflags, l->descriptor_version, &l->descriptor, txn, l->nodesize, ZERO_LSN);
r = internal_recover_fopen_or_fcreate(renv, must_create, l->mode, &l->iname, l->filenum, l->treeflags, l->descriptor_version, &l->descriptor, txn, l->nodesize, MAX_LSN);
return r;
}
......@@ -672,7 +673,7 @@ static int toku_recover_fopen (struct logtype_fopen *l, RECOVER_ENV renv) {
if (strcmp(fname, ROLLBACK_CACHEFILE_NAME)) {
//Rollback cachefile can only be opened via fassociate.
r = internal_recover_fopen_or_fcreate(renv, must_create, 0, &l->iname, l->filenum, l->treeflags, descriptor_version, descriptor, txn, 0, ZERO_LSN);
r = internal_recover_fopen_or_fcreate(renv, must_create, 0, &l->iname, l->filenum, l->treeflags, descriptor_version, descriptor, txn, 0, MAX_LSN);
}
toku_free(fname);
return r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment