Commit 0c7837dd authored by Yoni Fogel's avatar Yoni Fogel

closes[t:2440] Merge #2440 to main.

closes[t:2512] merge #2512 to main
Delete dev branch

git-svn-id: file:///svn/toku/tokudb@19439 c7de825b-a66e-492c-adef-691d508d4ae1
parent d7582480
......@@ -435,6 +435,7 @@ deserialize_brtheader_10 (int fd, struct rbuf *rb, struct brt_header **brth) {
h->panic_string = 0;
toku_list_init(&h->live_brts);
toku_list_init(&h->zombie_brts);
toku_list_init(&h->checkpoint_before_commit_link);
//version MUST be in network order on disk regardless of disk order
h->layout_version = rbuf_network_int(&rc);
assert(h->layout_version==BRT_LAYOUT_VERSION_10);
......
......@@ -180,8 +180,10 @@ struct brt_header {
// If a transaction locked the BRT when it was empty, which transaction? (Only the latest one matters)
// 0 if no such transaction
TXNID txnid_that_created_or_locked_when_empty;
TXNID txnid_that_suppressed_recovery_logs;
struct toku_list live_brts;
struct toku_list zombie_brts;
struct toku_list checkpoint_before_commit_link;
};
struct brt {
......
......@@ -1372,6 +1372,7 @@ deserialize_brtheader (int fd, struct rbuf *rb, struct brt_header **brth) {
h->panic_string = 0;
toku_list_init(&h->live_brts);
toku_list_init(&h->zombie_brts);
toku_list_init(&h->checkpoint_before_commit_link);
//version MUST be in network order on disk regardless of disk order
h->layout_version = rbuf_network_int(&rc);
//TODO: #1924
......
......@@ -2686,17 +2686,23 @@ toku_brt_log_put_multiple (TOKUTXN txn, BRT src_brt, BRT *brts, int num_brts, co
TOKULOGGER logger = toku_txn_logger(txn);
if (logger) {
FILENUM fnums[num_brts];
FILENUMS filenums = {.num = num_brts, .filenums = fnums};
int i;
int num_unsuppressed_brts = 0;
for (i = 0; i < num_brts; i++) {
fnums[i] = toku_cachefile_filenum(brts[i]->cf);
if (brts[i]->h->txnid_that_suppressed_recovery_logs == TXNID_NONE) {
//Logging not suppressed for this brt.
fnums[num_unsuppressed_brts++] = toku_cachefile_filenum(brts[i]->cf);
}
}
if (num_unsuppressed_brts) {
FILENUMS filenums = {.num = num_unsuppressed_brts, .filenums = fnums};
BYTESTRING keybs = {.len=key->size, .data=key->data};
BYTESTRING valbs = {.len=val->size, .data=val->data};
TXNID xid = toku_txn_get_txnid(txn);
FILENUM src_filenum = src_brt ? toku_cachefile_filenum(src_brt->cf) : FILENUM_NONE;
r = toku_log_enq_insert_multiple(logger, (LSN*)0, 0, src_filenum, filenums, xid, keybs, valbs);
}
}
return r;
}
......@@ -2725,7 +2731,8 @@ int toku_brt_maybe_insert (BRT brt, DBT *key, DBT *val, TOKUTXN txn, BOOL oplsn_
message_xids = xids_get_root_xids();
}
TOKULOGGER logger = toku_txn_logger(txn);
if (do_logging && logger) {
if (do_logging && logger &&
brt->h->txnid_that_suppressed_recovery_logs == TXNID_NONE) {
BYTESTRING keybs = {.len=key->size, .data=key->data};
BYTESTRING valbs = {.len=val->size, .data=val->data};
if (type == BRT_INSERT) {
......@@ -2759,17 +2766,23 @@ toku_brt_log_del_multiple (TOKUTXN txn, BRT src_brt, BRT *brts, int num_brts, co
TOKULOGGER logger = toku_txn_logger(txn);
if (logger) {
FILENUM fnums[num_brts];
FILENUMS filenums = {.num = num_brts, .filenums = fnums};
int i;
int num_unsuppressed_brts = 0;
for (i = 0; i < num_brts; i++) {
fnums[i] = toku_cachefile_filenum(brts[i]->cf);
if (brts[i]->h->txnid_that_suppressed_recovery_logs == TXNID_NONE) {
//Logging not suppressed for this brt.
fnums[num_unsuppressed_brts++] = toku_cachefile_filenum(brts[i]->cf);
}
}
if (num_unsuppressed_brts) {
FILENUMS filenums = {.num = num_unsuppressed_brts, .filenums = fnums};
BYTESTRING keybs = {.len=key->size, .data=key->data};
BYTESTRING valbs = {.len=val->size, .data=val->data};
TXNID xid = toku_txn_get_txnid(txn);
FILENUM src_filenum = src_brt ? toku_cachefile_filenum(src_brt->cf) : FILENUM_NONE;
r = toku_log_enq_delete_multiple(logger, (LSN*)0, 0, src_filenum, filenums, xid, keybs, valbs);
}
}
return r;
}
......@@ -2791,7 +2804,8 @@ int toku_brt_maybe_delete(BRT brt, DBT *key, TOKUTXN txn, BOOL oplsn_valid, LSN
message_xids = xids_get_root_xids();
}
TOKULOGGER logger = toku_txn_logger(txn);
if (do_logging && logger) {
if (do_logging && logger &&
brt->h->txnid_that_suppressed_recovery_logs == TXNID_NONE) {
BYTESTRING keybs = {.len=key->size, .data=key->data};
r = toku_log_enq_delete_any(logger, (LSN*)0, 0, toku_cachefile_filenum(brt->cf), xid, keybs);
if (r!=0) return r;
......@@ -3022,6 +3036,7 @@ brt_init_header (BRT t) {
toku_list_init(&t->h->live_brts);
toku_list_init(&t->h->zombie_brts);
toku_list_init(&t->h->checkpoint_before_commit_link);
int r = brt_init_header_partial(t);
if (r==0) toku_block_verify_no_free_blocknums(t->h->blocktable);
return r;
......@@ -3782,7 +3797,7 @@ brtheader_note_unpin_by_checkpoint (CACHEFILE UU(cachefile), void *header_v)
// Write checkpoint-in-progress versions of header and translation to disk (really to OS internal buffer).
// Must have access to fd (protected)
int
toku_brtheader_checkpoint (CACHEFILE UU(cachefile), int fd, void *header_v)
toku_brtheader_checkpoint (CACHEFILE cf, int fd, void *header_v)
{
struct brt_header *h = header_v;
struct brt_header *ch = h->checkpoint_header;
......@@ -3794,6 +3809,11 @@ toku_brtheader_checkpoint (CACHEFILE UU(cachefile), int fd, void *header_v)
if (ch->panic!=0) goto handle_error;
assert(ch->type == BRTHEADER_CHECKPOINT_INPROGRESS);
if (ch->dirty) { // this is only place this bit is tested (in checkpoint_header)
TOKULOGGER logger = toku_cachefile_logger(cf);
if (logger) {
r = toku_logger_fsync_if_lsn_not_fsynced(logger, ch->checkpoint_lsn);
if (r!=0) goto handle_error;
}
{
ch->checkpoint_count++;
// write translation and header to disk (or at least to OS internal buffer)
......@@ -5338,7 +5358,7 @@ int toku_brt_maybe_delete_both(BRT brt, DBT *key, DBT *val, TOKUTXN txn, BOOL op
message_xids = xids_get_root_xids();
}
TOKULOGGER logger = toku_txn_logger(txn);
if (logger) {
if (logger && brt->h->txnid_that_suppressed_recovery_logs == TXNID_NONE) {
BYTESTRING keybs = {.len=key->size, .data=key->data};
BYTESTRING valbs = {.len=val->size, .data=val->data};
r = toku_log_enq_delete_both(logger, (LSN*)0, 0, toku_cachefile_filenum(brt->cf), xid, keybs, valbs);
......@@ -5693,25 +5713,18 @@ toku_brt_is_empty (BRT brt) {
return is_empty;
}
int
toku_brt_note_table_lock (BRT brt, TOKUTXN txn, BOOL ignore_not_empty) {
int r = 0;
if (brt->h->txnid_that_created_or_locked_when_empty != toku_txn_get_txnid(txn) &&
(ignore_not_empty || toku_brt_is_empty(brt)) &&
brt->h->txnid_that_created_or_locked_when_empty == TXNID_NONE)
{
brt->h->txnid_that_created_or_locked_when_empty = toku_txn_get_txnid(txn);
r = toku_txn_note_brt(txn, brt);
assert(r==0);
r = toku_logger_save_rollback_tablelock_on_empty_table(txn, toku_cachefile_filenum(brt->cf));
if (r==0) {
TOKULOGGER logger = toku_txn_logger(txn);
TXNID xid = toku_txn_get_txnid(txn);
r = toku_log_tablelock_on_empty_table(logger, (LSN*)NULL,
0, toku_cachefile_filenum(brt->cf), xid);
}
}
return r;
//Suppress both rollback and recovery logs.
void
toku_brt_suppress_recovery_logs (BRT brt, TOKUTXN txn) {
assert(brt->h->txnid_that_created_or_locked_when_empty == toku_txn_get_txnid(txn));
assert(brt->h->txnid_that_suppressed_recovery_logs == TXNID_NONE);
brt->h->txnid_that_suppressed_recovery_logs = toku_txn_get_txnid(txn);
toku_list_push(&txn->checkpoint_before_commit, &brt->h->checkpoint_before_commit_link);
}
BOOL
toku_brt_is_recovery_logging_suppressed (BRT brt) {
return brt->h->txnid_that_suppressed_recovery_logs != TXNID_NONE;
}
LSN toku_brt_checkpoint_lsn(BRT brt) {
......
......@@ -206,8 +206,11 @@ void toku_maybe_truncate_cachefile (CACHEFILE cf, int fd, u_int64_t size_used);
int maybe_preallocate_in_file (int fd, u_int64_t size);
// Effect: If file size is less than SIZE, make it bigger by either doubling it or growing by 16MB whichever is less.
int toku_brt_note_table_lock (BRT brt, TOKUTXN txn, BOOL ignore_not_empty);
// Effect: Record the fact that the BRT has a table lock (and thus no other txn will modify it until this txn completes. As a result, we can limit the amount of information in the rollback data structure.
void toku_brt_suppress_recovery_logs (BRT brt, TOKUTXN txn);
// Effect: suppresses recovery logs
// Requires: this is a (target) redirected brt
// implies: txnid_that_created_or_locked_when_empty matches txn
// implies: toku_txn_note_brt(brt, txn) has been called
int toku_brt_zombie_needed (BRT brt);
......@@ -217,6 +220,7 @@ BOOL toku_brt_is_empty (BRT brt);
double get_tdiff(void) __attribute__((__visibility__("default")));
BOOL toku_brt_is_recovery_logging_suppressed (BRT);
//TODO: #1485 once we have multiple main threads, restore this code, analyze performance.
#ifndef TOKU_MULTIPLE_MAIN_THREADS
#define TOKU_MULTIPLE_MAIN_THREADS 0
......
This diff is collapsed.
......@@ -322,4 +322,5 @@ void toku_cachetable_set_env_dir(CACHETABLE ct, char *env_dir);
char * toku_construct_full_name(int count, ...);
char * toku_cachetable_get_fname_in_cwd(CACHETABLE ct, const char * fname_in_env);
int toku_cachetable_local_checkpoint_for_commit(CACHETABLE ct, TOKUTXN txn, uint32_t n, CACHEFILE cachefiles[n]);
#endif
......@@ -135,6 +135,7 @@ struct tokutxn {
uint32_t current_rollback_hash;
BOOL recovered_from_checkpoint;
ROLLBACK_LOG_NODE pinned_inprogress_rollback_log;
struct toku_list checkpoint_before_commit;
};
struct txninfo {
......
......@@ -13,8 +13,8 @@
#include "memory.h"
#include "x1764.h"
typedef void(*voidfp)(void);
typedef void(*YIELDF)(voidfp, void*);
typedef void(*voidfp)(void *thunk);
typedef void(*YIELDF)(voidfp, void *fpthunk, void *yieldthunk);
struct roll_entry;
#include "logger.h"
......
......@@ -77,8 +77,6 @@ const struct logtype rollbacks[] = {
{"BLOCKNUM", "spilled_tail", 0},
{"u_int32_t", "spilled_tail_hash", 0},
NULLFIELD}},
{"tablelock_on_empty_table", 'L', FA{{"FILENUM", "filenum", 0},
NULLFIELD}},
{"load", 'l', FA{{"BYTESTRING", "old_iname", 0},
{"BYTESTRING", "new_iname", 0},
NULLFIELD}},
......@@ -90,6 +88,7 @@ const struct logtype rollbacks[] = {
const struct logtype logtypes[] = {
// Records produced by checkpoints
{"local_txn_checkpoint", 'c', FA{{"TXNID", "xid", 0}, NULLFIELD}},
{"begin_checkpoint", 'x', FA{{"u_int64_t", "timestamp", 0}, NULLFIELD}},
{"end_checkpoint", 'X', FA{{"TXNID", "xid", 0}, // xid is LSN of begin_checkpoint
{"u_int64_t", "timestamp", 0},
......@@ -143,9 +142,6 @@ const struct logtype logtypes[] = {
{"fdelete", 'U', FA{{"TXNID", "xid", 0},
{"BYTESTRING", "iname", 0},
NULLFIELD}},
{"tablelock_on_empty_table", 'L', FA{{"FILENUM", "filenum", 0},
{"TXNID", "xid", 0},
NULLFIELD}},
{"enq_insert", 'I', FA{{"FILENUM", "filenum", 0},
{"TXNID", "xid", 0},
{"BYTESTRING", "key", 0},
......@@ -432,6 +428,7 @@ generate_log_reader (void) {
fprintf2(cf, hf, "int toku_log_fread_backward (FILE *infile, struct log_entry *le)");
fprintf(hf, ";\n");
fprintf(cf, "{\n");
fprintf(cf, " memset(le, 0, sizeof(*le));\n");
fprintf(cf, " {\n long pos = ftell(infile);\n if (pos<=12) return -1;\n }\n");
fprintf(cf, " int r = fseek(infile, -4, SEEK_CUR); \n");// assert(r==0);\n");
fprintf(cf, " if (r!=0) return errno;\n");
......
......@@ -458,6 +458,24 @@ int toku_logger_fsync (TOKULOGGER logger)
return r;
}
int
toku_logger_fsync_if_lsn_not_fsynced (TOKULOGGER logger, LSN lsn) {
int r = 0;
if (logger->is_panicked) r = EINVAL;
else if (logger->write_log_files && logger->fsynced_lsn.lsn < lsn.lsn) {
r = ml_lock(&logger->input_lock); assert(r==0);
logger->input_lock_ctr++;
r = toku_logger_maybe_fsync(logger, lsn, TRUE);
if (r!=0) {
toku_logger_panic(logger, r);
}
else {
assert(logger->fsynced_lsn.lsn >= lsn.lsn);
}
}
return r;
}
void toku_logger_panic (TOKULOGGER logger, int err) {
logger->panic_errno=err;
logger->is_panicked=TRUE;
......
......@@ -21,6 +21,7 @@ int toku_logger_open_rollback(TOKULOGGER logger, CACHETABLE cachetable, BOOL cre
int toku_logger_close_rollback(TOKULOGGER logger, BOOL recovery_failed);
int toku_logger_fsync (TOKULOGGER logger);
int toku_logger_fsync_if_lsn_not_fsynced(TOKULOGGER logger, LSN lsn);
void toku_logger_panic (TOKULOGGER logger, int err);
int toku_logger_panicked(TOKULOGGER logger);
int toku_logger_is_open(TOKULOGGER logger);
......
......@@ -231,8 +231,8 @@ static const char *recover_state(RECOVER_ENV renv) {
// function supplied to transaction commit and abort
// No yielding is necessary, but it must call the f function if provided.
static void recover_yield(voidfp f, void *UU(extra)) {
if (f) f();
static void recover_yield(voidfp f, void *fpthunk, void *UU(yieldthunk)) {
if (f) f(fpthunk);
}
static int
......@@ -290,6 +290,30 @@ static int internal_recover_fopen_or_fcreate (RECOVER_ENV renv, BOOL must_create
return 0;
}
static int toku_recover_local_txn_checkpoint (struct logtype_local_txn_checkpoint *l, RECOVER_ENV UU(renv)) {
int r;
switch (renv->ss.ss) {
case FORWARD_BETWEEN_CHECKPOINT_BEGIN_END:
case FORWARD_NEWER_CHECKPOINT_END: {
// assert that the transaction exists
TOKUTXN txn = NULL;
r = toku_txnid2txn(renv->logger, l->xid, &txn);
assert(r == 0 && txn != NULL);
r = 0;
break;
}
default:
assert(0);
return 0;
}
return r;
}
static int toku_recover_backward_local_txn_checkpoint (struct logtype_local_txn_checkpoint *UU(l), RECOVER_ENV UU(renv)) {
// nothing
return 0;
}
static int toku_recover_begin_checkpoint (struct logtype_begin_checkpoint *l, RECOVER_ENV renv) {
int r;
switch (renv->ss.ss) {
......@@ -549,9 +573,7 @@ static int toku_recover_xcommit (struct logtype_xcommit *l, RECOVER_ENV renv) {
// commit the transaction
r = toku_txn_commit_with_lsn(txn, TRUE, recover_yield, NULL, l->lsn,
NULL, NULL,
// No need to release locks during recovery.
NULL, NULL, NULL);
NULL, NULL);
assert(r == 0);
// close the transaction
......@@ -709,26 +731,6 @@ static int toku_recover_backward_fdelete (struct logtype_fdelete *UU(l), RECOVER
return 0;
}
static int toku_recover_tablelock_on_empty_table(struct logtype_tablelock_on_empty_table *l, RECOVER_ENV renv) {
struct file_map_tuple *tuple = NULL;
int r = file_map_find(&renv->fmap, l->filenum, &tuple);
if (r==0) {
//Our work is only if it is open
TOKUTXN txn = NULL;
r = toku_txnid2txn(renv->logger, l->xid, &txn);
assert(r == 0);
assert(txn != NULL);
r = toku_brt_note_table_lock(tuple->brt, txn, TRUE);
assert(r == 0);
}
return 0;
}
static int toku_recover_backward_tablelock_on_empty_table(struct logtype_tablelock_on_empty_table *UU(l), RECOVER_ENV UU(renv)) {
// nothing
return 0;
}
static int toku_recover_enq_insert (struct logtype_enq_insert *l, RECOVER_ENV renv) {
int r;
TOKUTXN txn = NULL;
......
......@@ -315,7 +315,7 @@ toku_apply_rollinclude (TXNID xid,
r = func(txn, item, yield, yieldv, oplsn);
if (r!=0) return r;
count++;
if (count%2 == 0) yield(NULL, yieldv);
if (count%2 == 0) yield(NULL, NULL, yieldv);
}
if (next_log.b == spilled_head.b) {
assert(!found_head);
......@@ -382,55 +382,6 @@ toku_rollback_rollinclude (TXNID xid,
return r;
}
int
toku_rollback_tablelock_on_empty_table (FILENUM filenum,
TOKUTXN txn,
YIELDF yield,
void* yield_v,
LSN oplsn)
{
//TODO: Replace truncate function with something that doesn't need to mess with checkpoints.
// on rollback we have to make the file be empty, since we locked an empty table, and then may have done things to it.
CACHEFILE cf;
//printf("%s:%d committing insert %s %s\n", __FILE__, __LINE__, key.data, data.data);
int r = toku_cachefile_of_filenum(txn->logger->ct, filenum, &cf);
if (r==ENOENT) { //Missing file on recovered transaction is not an error
assert(txn->recovered_from_checkpoint);
r = 0;
goto done;
}
assert(r==0);
OMTVALUE brtv=NULL;
r = toku_omt_find_zero(txn->open_brts, find_brt_from_filenum, &filenum, &brtv, NULL, NULL);
assert(r==0);
BRT brt = brtv;
{ //Do NOT truncate the file if
//the file already survived the truncate and was checkpointed.
LSN treelsn = toku_brt_checkpoint_lsn(brt);
if (oplsn.lsn != 0 && oplsn.lsn <= treelsn.lsn) {
r = 0;
goto done;
}
}
toku_poll_txn_progress_function(txn, FALSE, TRUE);
yield(toku_checkpoint_safe_client_lock, yield_v);
toku_poll_txn_progress_function(txn, FALSE, FALSE);
r = toku_brt_truncate(brt);
assert(r==0);
toku_checkpoint_safe_client_unlock();
done:
return r;
}
int
toku_commit_tablelock_on_empty_table (FILENUM filenum, TOKUTXN txn, YIELDF UU(yield), void* UU(yield_v), LSN UU(oplsn))
{
return do_nothing_with_filenum(txn, filenum);
}
int
toku_commit_load (BYTESTRING old_iname,
BYTESTRING UU(new_iname),
......
......@@ -104,7 +104,7 @@ toku_apply_txn (TOKUTXN txn, YIELDF yield, void*yieldv, LSN lsn,
r = func(txn, item, yield, yieldv, lsn);
if (r!=0) return r;
count++;
if (count%2 == 0) yield(NULL, yieldv);
if (count%2 == 0) yield(NULL, NULL, yieldv);
}
}
if (next_log.b == txn->spilled_rollback_head.b) {
......@@ -206,6 +206,11 @@ static int note_brt_used_in_txns_parent(OMTVALUE brtv, u_int32_t UU(index), void
//Pass magic "no rollback needed" flag to parent.
brt->h->txnid_that_created_or_locked_when_empty = toku_txn_get_txnid(parent);
}
if (r==0 &&
brt->h->txnid_that_suppressed_recovery_logs == toku_txn_get_txnid(child)) {
//Pass magic "no recovery needed" flag to parent.
brt->h->txnid_that_suppressed_recovery_logs = toku_txn_get_txnid(parent);
}
return r;
}
......@@ -279,6 +284,12 @@ int toku_rollback_commit(TOKUTXN txn, YIELDF yield, void*yieldv, LSN lsn) {
r = toku_omt_iterate(txn->open_brts, note_brt_used_in_txns_parent, txn);
assert(r==0);
// Merge the list of headers that must be checkpointed before commit
while (!toku_list_empty(&txn->checkpoint_before_commit)) {
struct toku_list *list = toku_list_pop(&txn->checkpoint_before_commit);
toku_list_push(&txn->parent->checkpoint_before_commit, list);
}
//If this transaction needs an fsync (if it commits)
//save that in the parent. Since the commit really happens in the root txn.
txn->parent->force_fsync_on_commit |= txn->force_fsync_on_commit;
......@@ -293,6 +304,11 @@ int toku_rollback_commit(TOKUTXN txn, YIELDF yield, void*yieldv, LSN lsn) {
int toku_rollback_abort(TOKUTXN txn, YIELDF yield, void*yieldv, LSN lsn) {
int r;
//Empty the list
while (!toku_list_empty(&txn->checkpoint_before_commit)) {
toku_list_pop(&txn->checkpoint_before_commit);
}
r = toku_apply_txn(txn, yield, yieldv, lsn, toku_abort_rollback_item);
assert(r==0);
return r;
......@@ -557,7 +573,10 @@ static int remove_txn (OMTVALUE brtv, u_int32_t UU(idx), void *txnv) {
r = toku_omt_delete_at(brt->txns, index);
assert(r==0);
if (txn->txnid64==brt->h->txnid_that_created_or_locked_when_empty) {
brt->h->txnid_that_created_or_locked_when_empty = 0;
brt->h->txnid_that_created_or_locked_when_empty = TXNID_NONE;
}
if (txn->txnid64==brt->h->txnid_that_suppressed_recovery_logs) {
brt->h->txnid_that_suppressed_recovery_logs = TXNID_NONE;
}
if (!toku_brt_zombie_needed(brt) && brt->was_closed) {
//Close immediately.
......
......@@ -147,7 +147,13 @@ static inline int rwlock_readers(RWLOCK rwlock) {
return rwlock->reader;
}
// returns: the number of writers
// returns: the number of readers who are waiting for the lock
static inline int rwlock_blocked_readers(RWLOCK rwlock) {
return rwlock->want_read;
}
// returns: the number of writers who are waiting for the lock
static inline int rwlock_blocked_writers(RWLOCK rwlock) {
return rwlock->want_write;
......
......@@ -5,6 +5,7 @@
#include "includes.h"
#include "txn.h"
#include "checkpoint.h"
int toku_txn_begin_txn (TOKUTXN parent_tokutxn, TOKUTXN *tokutxn, TOKULOGGER logger) {
return toku_txn_begin_with_xid(parent_tokutxn, tokutxn, logger, 0);
......@@ -70,6 +71,7 @@ int toku_txn_begin_with_xid (TOKUTXN parent_tokutxn, TOKUTXN *tokutxn, TOKULOGGE
result->rollentry_raw_count = 0;
result->force_fsync_on_commit = FALSE;
result->recovered_from_checkpoint = FALSE;
toku_list_init(&result->checkpoint_before_commit);
*tokutxn = result;
return 0;
......@@ -112,16 +114,52 @@ toku_txn_load_txninfo (TOKUTXN txn, TXNINFO info) {
// Doesn't close the txn, just performs the commit operations.
int toku_txn_commit_txn(TOKUTXN txn, int nosync, YIELDF yield, void *yieldv,
TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra,
void (*release_locks)(void*), void(*reacquire_locks)(void*), void *locks_thunk) {
TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra) {
return toku_txn_commit_with_lsn(txn, nosync, yield, yieldv, ZERO_LSN,
poll, poll_extra,
release_locks, reacquire_locks, locks_thunk);
poll, poll_extra);
}
struct xcommit_info {
int r;
TOKUTXN txn;
int do_fsync;
};
//Called during a yield (ydb lock NOT held).
static void
local_checkpoints_and_log_xcommit(void *thunk) {
struct xcommit_info *info = thunk;
TOKUTXN txn = info->txn;
if (!txn->parent && !toku_list_empty(&txn->checkpoint_before_commit)) {
//Do local checkpoints that must happen BEFORE logging xcommit
uint32_t num_cachefiles = 0;
uint32_t list_size = 16;
CACHEFILE *cachefiles= NULL;
XMALLOC_N(list_size, cachefiles);
while (!toku_list_empty(&txn->checkpoint_before_commit)) {
struct toku_list *list = toku_list_pop(&txn->checkpoint_before_commit);
struct brt_header *h = toku_list_struct(list,
struct brt_header,
checkpoint_before_commit_link);
cachefiles[num_cachefiles++] = h->cf;
if (num_cachefiles == list_size) {
list_size *= 2;
XREALLOC_N(list_size, cachefiles);
}
}
assert(num_cachefiles);
CACHETABLE ct = toku_cachefile_get_cachetable(cachefiles[0]);
int r = toku_cachetable_local_checkpoint_for_commit(ct, txn, num_cachefiles, cachefiles);
assert(r==0);
}
info->r = toku_log_xcommit(txn->logger, (LSN*)0, info->do_fsync, txn->txnid64); // exits holding neither of the tokulogger locks.
}
int toku_txn_commit_with_lsn(TOKUTXN txn, int nosync, YIELDF yield, void *yieldv, LSN oplsn,
TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra,
void (*release_locks)(void*), void(*reacquire_locks)(void*), void *locks_thunk) {
TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra) {
int r;
// panic handled in log_commit
......@@ -131,9 +169,15 @@ int toku_txn_commit_with_lsn(TOKUTXN txn, int nosync, YIELDF yield, void *yieldv
txn->progress_poll_fun = poll;
txn->progress_poll_fun_extra = poll_extra;
if (release_locks) release_locks(locks_thunk);
r = toku_log_xcommit(txn->logger, (LSN*)0, do_fsync, txn->txnid64); // exits holding neither of the tokulogger locks.
if (reacquire_locks) reacquire_locks(locks_thunk);
{
struct xcommit_info info = {
.r = 0,
.txn = txn,
.do_fsync = do_fsync
};
yield(local_checkpoints_and_log_xcommit, &info, yieldv);
r = info.r;
}
if (r!=0)
return r;
r = toku_rollback_commit(txn, yield, yieldv, oplsn);
......@@ -152,6 +196,7 @@ int toku_txn_abort_with_lsn(TOKUTXN txn, YIELDF yield, void *yieldv, LSN oplsn,
// Must undo everything. Must undo it all in reverse order.
// Build the reverse list
//printf("%s:%d abort\n", __FILE__, __LINE__);
txn->progress_poll_fun = poll;
txn->progress_poll_fun_extra = poll_extra;
int r=0;
......
......@@ -10,11 +10,9 @@ int toku_txn_begin_with_xid (TOKUTXN parent_tokutxn, TOKUTXN *tokutxn, TOKULOGGE
int toku_txn_load_txninfo (TOKUTXN txn, TXNINFO info);
int toku_txn_commit_txn (TOKUTXN txn, int nosync, YIELDF yield, void *yieldv,
TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra,
void (*release_locks)(void*), void(*reacquire_locks)(void*), void *locks_thunk);
TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra);
int toku_txn_commit_with_lsn(TOKUTXN txn, int nosync, YIELDF yield, void *yieldv, LSN oplsn,
TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra,
void (*release_locks)(void*), void(*reacquire_locks)(void*), void *locks_thunk);
TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra);
int toku_txn_abort_txn(TOKUTXN txn, YIELDF yield, void *yieldv,
TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra);
......
......@@ -71,14 +71,15 @@ static void free_loader_resources(DB_LOADER *loader)
if (loader->i->ekeys) toku_free(loader->i->ekeys);
if (loader->i->evals) toku_free(loader->i->evals);
for (int i=0; i<loader->i->N; i++) {
if (loader->i->inames_in_env[i]) toku_free(loader->i->inames_in_env[i]);
}
if (loader->i->err_key.data) toku_free(loader->i->err_key.data);
if (loader->i->err_val.data) toku_free(loader->i->err_val.data);
if (loader->i->inames_in_env) toku_free(loader->i->inames_in_env);
if (loader->i->inames_in_env) {
for (int i=0; i<loader->i->N; i++) {
if (loader->i->inames_in_env[i]) toku_free(loader->i->inames_in_env[i]);
}
toku_free(loader->i->inames_in_env);
}
if (loader->i->temp_file_template) toku_free(loader->i->temp_file_template);
if (loader->i->brt_loader) toku_free(loader->i->brt_loader);
......@@ -162,7 +163,7 @@ int toku_loader_create_loader(DB_ENV *env,
int r;
// lock tables and check empty
for(int i=0;i<N;i++) {
r = toku_db_pre_acquire_table_lock(dbs[i], txn);
r = toku_db_pre_acquire_table_lock(dbs[i], txn, TRUE);
if ( r!=0 ) {
free_loader(loader);
return -1;
......@@ -200,9 +201,10 @@ int toku_loader_create_loader(DB_ENV *env,
LSN load_lsn;
r = locked_ydb_load_inames (env, txn, N, dbs, new_inames_in_env, &load_lsn);
if ( r!=0 ) {
toku_free(new_inames_in_env);
toku_free(descriptors);
free_loader(loader);
return -1;
return r;
}
toku_brt_loader_open(&loader->i->brt_loader,
loader->i->env->i->cachetable,
......
......@@ -66,6 +66,7 @@
static DB_ENV *env = NULL;
static DB_TXN *txn_parent = NULL;
static DB_TXN *txn_child = NULL;
static DB_TXN *txn_hold_dname_lock = NULL;
static DB *db;
static char *dname = DICT_0;
static DBT key;
......@@ -108,6 +109,37 @@ end_env(void) {
env = NULL;
}
static void
start_txn_prevent_dname_lock(void) {
assert(env!=NULL);
assert(txn_hold_dname_lock==NULL);
int r;
r=env->txn_begin(env, 0, &txn_hold_dname_lock, 0);
CKERR(r);
DB *db2;
r = db_create(&db2, env, 0);
CKERR(r);
r=db2->open(db2, txn_hold_dname_lock, dname, 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO);
CKERR(r);
r = db2->close(db2, 0);
}
static void nopoll(TOKU_TXN_PROGRESS UU(progress), void *UU(extra)) {
assert(FALSE);
}
static void
commit_txn_prevent_dname_lock(void) {
assert(env!=NULL);
assert(txn_hold_dname_lock!=NULL);
int r;
r = txn_hold_dname_lock->commit_with_progress(txn_hold_dname_lock, 0, nopoll, NULL);
CKERR(r);
txn_hold_dname_lock = NULL;
}
static void
start_txn(void) {
assert(env!=NULL);
......@@ -301,43 +333,29 @@ progress_test_1(int n, int commit) {
end_env();
}
struct progress_stall_expect {
int num_calls;
BOOL has_been_stalled;
};
static void
abort_txn_stall_checkpoint(void) {
//We have disabled the norollback log fallback optimization.
//Checkpoint will not stall
assert(env!=NULL);
assert(txn_parent);
assert(!txn_child);
static void stall_poll(TOKU_TXN_PROGRESS progress, void *extra) {
struct progress_stall_expect *info = extra;
info->num_calls++;
assert(info->num_calls <= 2);
assert(progress->is_commit == FALSE);
if (!info->has_been_stalled) {
assert(info->num_calls==1);
assert(progress->stalled_on_checkpoint);
info->has_been_stalled = TRUE;
}
else {
assert(info->num_calls==2);
assert(!progress->stalled_on_checkpoint);
}
int r;
r=txn_parent->abort_with_progress(txn_parent, nopoll, NULL);
CKERR(r);
txn_parent = NULL;
}
static void
abort_txn_stall_checkpoint(void) {
abort_txn_nostall_checkpoint(void) {
assert(env!=NULL);
assert(txn_parent);
assert(!txn_child);
struct progress_stall_expect extra = {
.num_calls = 0,
.has_been_stalled = FALSE
};
int r;
r=txn_parent->abort_with_progress(txn_parent, stall_poll, &extra);
r=txn_parent->abort_with_progress(txn_parent, nopoll, NULL);
CKERR(r);
assert(extra.num_calls == 2);
txn_parent = NULL;
}
......@@ -358,12 +376,25 @@ progress_test_2(void) {
start_env();
open_db();
start_txn();
start_txn_prevent_dname_lock();
lock();
commit_txn_prevent_dname_lock();
abort_txn_stall_checkpoint();
close_db();
end_env();
}
static void
progress_test_3(void) {
start_env();
open_db();
start_txn();
lock();
abort_txn_nostall_checkpoint();
close_db();
end_env();
}
int
test_main (int argc, char * const argv[])
{
......@@ -373,5 +404,6 @@ test_main (int argc, char * const argv[])
progress_test_1(4, commit);
}
progress_test_2();
progress_test_3();
return 0;
}
......@@ -41,7 +41,7 @@ do_x1_shutdown (void) {
DB_LOADER *loader;
DB *dbs[1] = {db};
uint32_t db_flags[1] = {DB_NOOVERWRITE};
uint32_t dbt_flags[1];
uint32_t dbt_flags[1] = {0};
uint32_t loader_flags = 0;
r = env->create_loader(env, tid, &loader, NULL, 1, dbs, db_flags, dbt_flags, loader_flags);
......
......@@ -197,6 +197,6 @@ struct __toku_dbc_internal {
struct simple_dbt *skey,*sval;
};
int toku_db_pre_acquire_table_lock(DB *db, DB_TXN *txn);
int toku_db_pre_acquire_table_lock(DB *db, DB_TXN *txn, BOOL just_lock);
#endif
......@@ -1830,18 +1830,9 @@ static int toku_txn_release_locks(DB_TXN* txn) {
// Yield the lock so someone else can work, and then reacquire the lock.
// Useful while processing commit or rollback logs, to allow others to access the system.
static void ydb_yield (voidfp f, void *UU(v)) {
static void ydb_yield (voidfp f, void *fv, void *UU(v)) {
toku_ydb_unlock();
if (f) f();
toku_ydb_lock();
}
static void release_ydb_lock_callback (void *ignore __attribute__((__unused__))) {
//printf("%8.6fs Thread %ld release\n", get_tdiff(), pthread_self());
toku_ydb_unlock();
}
static void reacquire_ydb_lock_callback (void *ignore __attribute__((__unused__))) {
//printf("%8.6fs Thread %ld reacquire\n", get_tdiff(), pthread_self());
if (f) f(fv);
toku_ydb_lock();
}
......@@ -1886,8 +1877,7 @@ static int toku_txn_commit(DB_TXN * txn, u_int32_t flags,
// Calls ydb_yield(NULL) occasionally
//r = toku_logger_commit(db_txn_struct_i(txn)->tokutxn, nosync, ydb_yield, NULL);
r = toku_txn_commit_txn(db_txn_struct_i(txn)->tokutxn, nosync, ydb_yield, NULL,
poll, poll_extra,
release_ydb_lock_callback, reacquire_ydb_lock_callback, NULL);
poll, poll_extra);
if (r!=0 && !toku_env_is_panicked(txn->mgrp)) {
txn->mgrp->i->is_panicked = r;
......@@ -4729,7 +4719,7 @@ toku_env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbna
if (r==0) {
DB* zombie = env_get_zombie_db_with_dname(env, dname);
if (zombie)
r = toku_db_pre_acquire_table_lock(zombie, child);
r = toku_db_pre_acquire_table_lock(zombie, child, TRUE);
if (r!=0)
toku_ydb_do_error(env, r, "Cannot remove dictionary.\n");
}
......@@ -4846,7 +4836,7 @@ toku_env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbnam
if (r==0) {
zombie = env_get_zombie_db_with_dname(env, dname);
if (zombie)
r = toku_db_pre_acquire_table_lock(zombie, child);
r = toku_db_pre_acquire_table_lock(zombie, child, TRUE);
if (r!=0)
toku_ydb_do_error(env, r, "Cannot rename dictionary.\n");
}
......@@ -5041,7 +5031,7 @@ static int toku_db_pre_acquire_read_lock(DB *db, DB_TXN *txn, const DBT *key_lef
//static int toku_db_pre_acquire_table_lock(DB *db, DB_TXN *txn) {
// needed by loader.c
int toku_db_pre_acquire_table_lock(DB *db, DB_TXN *txn) {
int toku_db_pre_acquire_table_lock(DB *db, DB_TXN *txn, BOOL just_lock) {
HANDLE_PANICKED_DB(db);
if (!db->i->lt || !txn) return EINVAL;
......@@ -5053,8 +5043,40 @@ int toku_db_pre_acquire_table_lock(DB *db, DB_TXN *txn) {
r = toku_lt_acquire_range_write_lock(db->i->lt, db, id_anc,
toku_lt_neg_infinity, toku_lt_neg_infinity,
toku_lt_infinity, toku_lt_infinity);
if (r==0) {
r = toku_brt_note_table_lock(db->i->brt, db_txn_struct_i(txn)->tokutxn, FALSE); // tell the BRT layer that the table is locked (so that it can reduce the amount of rollback data.
if (r==0 && !just_lock &&
!toku_brt_is_recovery_logging_suppressed(db->i->brt) &&
toku_brt_is_empty(db->i->brt) &&
db_is_nodup(db) //TODO: Remove this check once we kill dupsort support.
) {
//Try to suppress both rollback and recovery logs
DB_LOADER *loader;
DB *dbs[1] = {db};
uint32_t db_flags[1] = {DB_NOOVERWRITE};
uint32_t dbt_flags[1] = {0};
uint32_t loader_flags = 0;
DB_ENV *env = db->dbenv;
toku_ydb_unlock(); //Cannot hold ydb lock when creating loader
int r_loader;
r_loader = env->create_loader(env, txn, &loader, NULL, 1, dbs, db_flags, dbt_flags, loader_flags);
if (r_loader==0) {
int r2;
r2 = loader->set_error_callback(loader, NULL, NULL);
assert(r2==0);
r2 = loader->set_poll_function(loader, NULL, NULL);
assert(r2==0);
// close the loader
r2 = loader->close(loader);
assert(r2==0);
toku_brt_suppress_recovery_logs(db->i->brt, db_txn_struct_i(txn)->tokutxn);
}
else if (r_loader != DB_LOCK_NOTGRANTED) {
//Lock not granted is not an error.
//It just means we cannot use the loader optimization.
assert(r==0);
r = r_loader;
}
toku_ydb_lock(); //Reaquire ydb lock.
}
return r;
......@@ -5174,7 +5196,7 @@ static int locked_db_pre_acquire_read_lock(DB *db, DB_TXN *txn, const DBT *key_l
static int locked_db_pre_acquire_table_lock(DB *db, DB_TXN *txn) {
toku_ydb_lock();
int r = toku_db_pre_acquire_table_lock(db, txn);
int r = toku_db_pre_acquire_table_lock(db, txn, FALSE);
toku_ydb_unlock();
return r;
}
......@@ -5202,7 +5224,7 @@ static int toku_db_truncate(DB *db, DB_TXN *txn, u_int32_t *row_count, u_int32_t
// acquire a table lock
if (txn) {
r = toku_db_pre_acquire_table_lock(db, txn);
r = toku_db_pre_acquire_table_lock(db, txn, TRUE);
if (r != 0)
return r;
}
......@@ -5601,10 +5623,10 @@ ydb_load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[N], char * new_iname
char hint[strlen(dname) + 1];
create_iname_hint(dname, hint);
char * new_iname = create_iname(env, xid, hint, i); // allocates memory for iname_in_env
new_inames_in_env[i] = new_iname;
toku_fill_dbt(&iname_dbt, new_iname, strlen(new_iname) + 1); // iname_in_env goes in directory
rval = toku_db_put(env->i->directory, child, &dname_dbt, &iname_dbt, DB_YESOVERWRITE); // DB_YESOVERWRITE necessary
if (rval) break;
new_inames_in_env[i] = new_iname;
}
// Generate load log entries.
......
......@@ -360,17 +360,23 @@ else
endif
.PHONY: tags
tags: cscope.out TAGS
tags: $(TOKUROOT)cscope.out $(TOKUROOT)TAGS
ifneq ($(TOKUROOT),./)
TAGS: $(TOKUROOT)*/*.[ch] $(TOKUROOT)*/*/*.[ch] $(TOKUROOT)*/*/*/*.[ch]
rm -f `find $(TOKUROOT) -type f -name TAGS` #Delete all other tag files
etags $(TOKUROOT)*/*.[ch] $(TOKUROOT)*/*/*.[ch] $(TOKUROOT)*/*/*/*.[ch]
endif
$(TOKUROOT)TAGS: $(TOKUROOT)*/*.[ch] $(TOKUROOT)*/*/*.[ch] $(TOKUROOT)*/*/*/*.[ch]
cd $(TOKUROOT) && etags */*.[ch] */*/*.[ch] */*/*/*.[ch]
$(TOKUROOT)cscope.files: $(TOKUROOT)*/*.[ch] $(TOKUROOT)*/*/*.[ch] $(TOKUROOT)*/*/*/*.[ch]
cd $(TOKUROOT) && (echo */*.[ch] */*/*.[ch] */*/*/*.[ch] | tr " " "\n") > $(notdir $@)
cscope.files: $(TOKUROOT)*/*.[ch] $(TOKUROOT)*/*/*.[ch] $(TOKUROOT)*/*/*/*.[ch]
(echo $(TOKUROOT)*/*.[ch] $(TOKUROOT)*/*/*.[ch] $(TOKUROOT)*/*/*/*.[ch] | tr " " "\n") > $@
$(TOKUROOT)cscope.in.out $(TOKUROOT)cscope.po.out: $(TOKUROOT)cscope.out;
cscope.out: cscope.files $(TOKUROOT)*/*.[ch] $(TOKUROOT)*/*/*.[ch] $(TOKUROOT)*/*/*/*.[ch]
$(CSCOPE) -qb
$(TOKUROOT)cscope.out: $(TOKUROOT)cscope.files $(TOKUROOT)*/*.[ch] $(TOKUROOT)*/*/*.[ch] $(TOKUROOT)*/*/*/*.[ch]
cd $(TOKUROOT) && $(CSCOPE) -qb
.PHONY: clean clean-default %.dir.clean
clean: clean-default
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment