Commit 40c1bc80 authored by Zardosht Kasheff's avatar Zardosht Kasheff Committed by Yoni Fogel

refs #6057, merge backing out of read txn work to main

git-svn-id: file:///svn/toku/tokudb@53466 c7de825b-a66e-492c-adef-691d508d4ae1
parent fc962b10
......@@ -249,7 +249,6 @@ static void print_defines (void) {
#endif
dodefine_from_track(txn_flags, DB_INHERIT_ISOLATION);
dodefine_from_track(txn_flags, DB_SERIALIZABLE);
dodefine_from_track(txn_flags, DB_TXN_READ_ONLY);
}
/* TOKUDB specific error codes*/
......
......@@ -3933,10 +3933,7 @@ static int
does_txn_read_entry(TXNID id, TOKUTXN context) {
int rval;
TXNID oldest_live_in_snapshot = toku_get_oldest_in_live_root_txn_list(context);
if (oldest_live_in_snapshot == TXNID_NONE && id < context->snapshot_txnid64) {
rval = TOKUDB_ACCEPT;
}
else if (id < oldest_live_in_snapshot || id == context->txnid.parent_id64) {
if (id < oldest_live_in_snapshot || id == context->txnid.parent_id64) {
rval = TOKUDB_ACCEPT;
}
else if (id > context->snapshot_txnid64 || toku_is_txn_in_live_root_txn_list(*context->live_root_txn_list, id)) {
......
......@@ -412,7 +412,6 @@ generate_log_writer (void) {
fprintf(cf, " //txn can be NULL during tests\n");
fprintf(cf, " //never null when not checkpoint.\n");
fprintf(cf, " if (txn && !txn->begin_was_logged) {\n");
fprintf(cf, " invariant(!txn_declared_read_only(txn));\n");
fprintf(cf, " toku_maybe_log_begin_txn_for_write_operation(txn);\n");
fprintf(cf, " }\n");
break;
......@@ -420,7 +419,6 @@ generate_log_writer (void) {
case ASSERT_BEGIN_WAS_LOGGED: {
fprintf(cf, " //txn can be NULL during tests\n");
fprintf(cf, " invariant(!txn || txn->begin_was_logged);\n");
fprintf(cf, " invariant(!txn || !txn_declared_read_only(txn));\n");
break;
}
case IGNORE_LOG_BEGIN: break;
......
......@@ -480,16 +480,7 @@ recover_transaction(TOKUTXN *txnp, TXNID_PAIR xid, TXNID_PAIR parentxid, TOKULOG
toku_txnid2txn(logger, xid, &txn);
assert(txn==NULL);
}
r = toku_txn_begin_with_xid(
parent,
&txn,
logger,
xid,
TXN_SNAPSHOT_NONE,
NULL,
true, // for_recovery
false // read_only
);
r = toku_txn_begin_with_xid(parent, &txn, logger, xid, TXN_SNAPSHOT_NONE, NULL, true);
assert(r == 0);
// We only know about it because it was logged. Restore the log bit.
// Logging is 'off' but it will still set the bit.
......
......@@ -97,7 +97,6 @@ test_writer_priority_thread (void *arg) {
static void
test_writer_priority (void) {
struct rw_event rw_event, *rwe = &rw_event;
ZERO_STRUCT(rw_event);
int r;
rw_event_init(rwe);
......@@ -153,7 +152,6 @@ test_single_writer_thread (void *arg) {
static void
test_single_writer (void) {
struct rw_event rw_event, *rwe = &rw_event;
ZERO_STRUCT(rw_event);
int r;
rw_event_init(rwe);
......
......@@ -32,7 +32,7 @@ static void test_it (int N) {
r = toku_logger_open_rollback(logger, ct, true); CKERR(r);
TOKUTXN txn;
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r);
r = toku_open_ft_handle(FILENAME, 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
......@@ -44,12 +44,12 @@ static void test_it (int N) {
unsigned int rands[N];
for (int i=0; i<N; i++) {
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r);
r = toku_open_ft_handle(FILENAME, 0, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r);
char key[100],val[300];
DBT k, v;
rands[i] = random();
......@@ -67,12 +67,12 @@ static void test_it (int N) {
if (verbose) printf("i=%d\n", i);
}
for (int i=0; i<N; i++) {
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r);
r = toku_open_ft_handle(FILENAME, 0, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r);
char key[100];
DBT k;
snprintf(key, sizeof(key), "key%x.%x", rands[i], i);
......@@ -92,7 +92,7 @@ static void test_it (int N) {
if (verbose) printf("d=%d\n", i);
}
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r);
r = toku_open_ft_handle(FILENAME, 0, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn);
......
......@@ -50,7 +50,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
assert(error == 0);
TOKUTXN txn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE);
assert(error == 0);
FT_HANDLE brt = NULL;
......@@ -62,7 +62,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
toku_txn_close_txn(txn);
txn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE);
assert(error == 0);
// insert keys 0, 1, 2, .. (n-1)
......@@ -120,7 +120,7 @@ test_provdel(const char *logdir, const char *fname, int n) {
assert(error == 0);
TOKUTXN txn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE);
assert(error == 0);
FT_HANDLE brt = NULL;
......@@ -132,7 +132,7 @@ test_provdel(const char *logdir, const char *fname, int n) {
toku_txn_close_txn(txn);
txn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE);
assert(error == 0);
// del keys 0, 2, 4, ...
......@@ -145,7 +145,7 @@ test_provdel(const char *logdir, const char *fname, int n) {
}
TOKUTXN cursortxn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &cursortxn, logger, TXN_SNAPSHOT_NONE, false);
error = toku_txn_begin_txn(NULL, NULL, &cursortxn, logger, TXN_SNAPSHOT_NONE);
assert(error == 0);
LE_CURSOR cursor = NULL;
......
......@@ -54,7 +54,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
assert(error == 0);
TOKUTXN txn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE);
assert(error == 0);
FT_HANDLE brt = NULL;
......@@ -66,7 +66,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
toku_txn_close_txn(txn);
txn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE);
assert(error == 0);
// insert keys 0, 1, 2, .. (n-1)
......
......@@ -51,7 +51,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
assert(error == 0);
TOKUTXN txn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE);
assert(error == 0);
FT_HANDLE brt = NULL;
......@@ -63,7 +63,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
toku_txn_close_txn(txn);
txn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE);
assert(error == 0);
// insert keys 0, 1, 2, .. (n-1)
......
......@@ -47,8 +47,7 @@ void txn_child_manager_unit_test::run_child_txn_test() {
NULL,
&root_txn,
logger,
TXN_SNAPSHOT_CHILD,
false
TXN_SNAPSHOT_CHILD
);
CKERR(r);
// test starting a child txn
......@@ -58,8 +57,7 @@ void txn_child_manager_unit_test::run_child_txn_test() {
root_txn,
&child_txn,
logger,
TXN_SNAPSHOT_CHILD,
false
TXN_SNAPSHOT_CHILD
);
CKERR(r);
......@@ -91,8 +89,7 @@ void txn_child_manager_unit_test::run_test() {
NULL,
&root_txn,
logger,
TXN_SNAPSHOT_ROOT,
false
TXN_SNAPSHOT_ROOT
);
CKERR(r);
txn_child_manager* cm = root_txn->child_manager;
......@@ -111,8 +108,7 @@ void txn_child_manager_unit_test::run_test() {
root_txn,
&child_txn,
logger,
TXN_SNAPSHOT_ROOT,
false
TXN_SNAPSHOT_ROOT
);
CKERR(r);
assert(child_txn->child_manager == cm);
......@@ -132,8 +128,7 @@ void txn_child_manager_unit_test::run_test() {
child_txn,
&grandchild_txn,
logger,
TXN_SNAPSHOT_ROOT,
false
TXN_SNAPSHOT_ROOT
);
CKERR(r);
assert(grandchild_txn->child_manager == cm);
......@@ -158,8 +153,7 @@ void txn_child_manager_unit_test::run_test() {
child_txn,
&grandchild_txn,
logger,
TXN_SNAPSHOT_ROOT,
false
TXN_SNAPSHOT_ROOT
);
CKERR(r);
assert(grandchild_txn->child_manager == cm);
......@@ -183,8 +177,7 @@ void txn_child_manager_unit_test::run_test() {
xid,
TXN_SNAPSHOT_NONE,
NULL,
true, // for recovery
false // read_only
true // for recovery
);
assert(recovery_txn->child_manager == cm);
......
......@@ -15,7 +15,7 @@
static void do_txn(TOKULOGGER logger, bool readonly) {
int r;
TOKUTXN txn;
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE, false);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE);
CKERR(r);
if (!readonly) {
......@@ -37,7 +37,7 @@ static void test_xid_lsn_independent(int N) {
FT_HANDLE brt;
TOKUTXN txn;
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE, false);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE);
CKERR(r);
r = toku_open_ft_handle("ftfile", 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun);
......@@ -47,7 +47,7 @@ static void test_xid_lsn_independent(int N) {
CKERR(r);
toku_txn_close_txn(txn);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE, false);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE);
CKERR(r);
TXNID xid_first = txn->txnid.parent_id64;
unsigned int rands[N];
......@@ -62,7 +62,7 @@ static void test_xid_lsn_independent(int N) {
}
{
TOKUTXN txn2;
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn2, logger, TXN_SNAPSHOT_NONE, false);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn2, logger, TXN_SNAPSHOT_NONE);
CKERR(r);
// Verify the txnid has gone up only by one (even though many log entries were done)
invariant(txn2->txnid.parent_id64 == xid_first + 1);
......@@ -77,7 +77,7 @@ static void test_xid_lsn_independent(int N) {
//TODO(yoni) #5067 will break this portion of the test. (End ids are also assigned, so it would increase by 4 instead of 2.)
// Verify the txnid has gone up only by two (even though many log entries were done)
TOKUTXN txn3;
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn3, logger, TXN_SNAPSHOT_NONE, false);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn3, logger, TXN_SNAPSHOT_NONE);
CKERR(r);
invariant(txn3->txnid.parent_id64 == xid_first + 2);
r = toku_txn_commit_txn(txn3, false, NULL, NULL);
......@@ -173,7 +173,7 @@ static void test_xid_lsn_independent_parents(int N) {
ZERO_ARRAY(txns_hack);
for (int i = 0; i < N; i++) {
r = toku_txn_begin_txn((DB_TXN*)NULL, txns[i-1], &txns[i], logger, TXN_SNAPSHOT_NONE, false);
r = toku_txn_begin_txn((DB_TXN*)NULL, txns[i-1], &txns[i], logger, TXN_SNAPSHOT_NONE);
CKERR(r);
if (i < num_non_cascade) {
......
......@@ -77,49 +77,19 @@ toku_txn_get_root_id(TOKUTXN txn)
return txn->txnid.parent_id64;
}
bool txn_declared_read_only(TOKUTXN txn) {
return (txn->txnid.parent_id64 == TXNID_NONE);
}
int
toku_txn_begin_txn (
DB_TXN *container_db_txn,
TOKUTXN parent_tokutxn,
TOKUTXN *tokutxn,
TOKULOGGER logger,
TXN_SNAPSHOT_TYPE snapshot_type,
bool read_only
TXN_SNAPSHOT_TYPE snapshot_type
)
{
int r = toku_txn_begin_with_xid(
parent_tokutxn,
tokutxn,
logger,
TXNID_PAIR_NONE,
snapshot_type,
container_db_txn,
false, // for_recovery
read_only
);
int r = toku_txn_begin_with_xid(parent_tokutxn, tokutxn, logger, TXNID_PAIR_NONE, snapshot_type, container_db_txn, false);
return r;
}
static void
txn_create_xids(TOKUTXN txn, TOKUTXN parent) {
XIDS xids;
XIDS parent_xids;
if (parent == NULL) {
parent_xids = xids_get_root_xids();
} else {
parent_xids = parent->xids;
}
xids_create_unknown_child(parent_xids, &xids);
TXNID finalized_xid = (parent == NULL) ? txn->txnid.parent_id64 : txn->txnid.child_id64;
xids_finalize_with_child(xids, finalized_xid);
txn->xids = xids;
}
int
toku_txn_begin_with_xid (
TOKUTXN parent,
......@@ -128,22 +98,24 @@ toku_txn_begin_with_xid (
TXNID_PAIR xid,
TXN_SNAPSHOT_TYPE snapshot_type,
DB_TXN *container_db_txn,
bool for_recovery,
bool read_only
bool for_recovery
)
{
int r = 0;
TOKUTXN txn;
// check for case where we are trying to
// create too many nested transactions
if (!read_only && parent && !xids_can_create_child(parent->xids)) {
r = EINVAL;
goto exit;
TOKUTXN txn;
XIDS xids;
// Do as much (safe) work as possible before serializing on the txn_manager lock.
XIDS parent_xids;
if (parent == NULL) {
parent_xids = xids_get_root_xids();
} else {
parent_xids = parent->xids;
}
if (read_only && parent) {
invariant(txn_declared_read_only(parent));
r = xids_create_unknown_child(parent_xids, &xids);
if (r != 0) {
return r;
}
toku_txn_create_txn(&txn, parent, logger, snapshot_type, container_db_txn, for_recovery);
toku_txn_create_txn(&txn, parent, logger, snapshot_type, container_db_txn, xids, for_recovery);
// txnid64, snapshot_txnid64
// will be set in here.
if (for_recovery) {
......@@ -167,8 +139,7 @@ toku_txn_begin_with_xid (
toku_txn_manager_start_txn(
txn,
logger->txn_manager,
snapshot_type,
read_only
snapshot_type
);
}
else {
......@@ -181,12 +152,10 @@ toku_txn_begin_with_xid (
);
}
}
if (!read_only) {
// this call will set txn->xids
txn_create_xids(txn, parent);
}
TXNID finalized_xid = (parent == NULL) ? txn->txnid.parent_id64 : txn->txnid.child_id64;
xids_finalize_with_child(txn->xids, finalized_xid);
*txnp = txn;
exit:
return r;
}
......@@ -211,6 +180,7 @@ void toku_txn_create_txn (
TOKULOGGER logger,
TXN_SNAPSHOT_TYPE snapshot_type,
DB_TXN *container_db_txn,
XIDS xids,
bool for_recovery
)
{
......@@ -246,7 +216,7 @@ static txn_child_manager tcm;
.child_manager = NULL,
.container_db_txn = container_db_txn,
.live_root_txn_list = nullptr,
.xids = NULL,
.xids = xids,
.oldest_referenced_xid = TXNID_NONE,
.begin_was_logged = false,
.do_fsync = false,
......@@ -570,9 +540,7 @@ void toku_txn_complete_txn(TOKUTXN txn) {
void toku_txn_destroy_txn(TOKUTXN txn) {
txn->open_fts.destroy();
if (txn->xids) {
xids_destroy(&txn->xids);
}
xids_destroy(&txn->xids);
toku_mutex_destroy(&txn->txn_lock);
toku_mutex_destroy(&txn->state_lock);
toku_cond_destroy(&txn->state_cond);
......@@ -589,14 +557,10 @@ void toku_txn_force_fsync_on_commit(TOKUTXN txn) {
}
TXNID toku_get_oldest_in_live_root_txn_list(TOKUTXN txn) {
invariant(txn->live_root_txn_list->size()>0);
TXNID xid;
if (txn->live_root_txn_list->size()>0) {
int r = txn->live_root_txn_list->fetch(0, &xid);
assert_zero(r);
}
else {
xid = TXNID_NONE;
}
int r = txn->live_root_txn_list->fetch(0, &xid);
assert_zero(r);
return xid;
}
......
......@@ -29,15 +29,13 @@ void toku_txn_lock(TOKUTXN txn);
void toku_txn_unlock(TOKUTXN txn);
uint64_t toku_txn_get_root_id(TOKUTXN txn);
bool txn_declared_read_only(TOKUTXN txn);
int toku_txn_begin_txn (
DB_TXN *container_db_txn,
TOKUTXN parent_tokutxn,
TOKUTXN *tokutxn,
TOKULOGGER logger,
TXN_SNAPSHOT_TYPE snapshot_type,
bool read_only
TXN_SNAPSHOT_TYPE snapshot_type
);
DB_TXN * toku_txn_get_container_db_txn (TOKUTXN tokutxn);
......@@ -51,12 +49,11 @@ int toku_txn_begin_with_xid (
TXNID_PAIR xid,
TXN_SNAPSHOT_TYPE snapshot_type,
DB_TXN *container_db_txn,
bool for_recovery,
bool read_only
bool for_recovery
);
// Allocate and initialize a txn
void toku_txn_create_txn(TOKUTXN *txn_ptr, TOKUTXN parent, TOKULOGGER logger, TXN_SNAPSHOT_TYPE snapshot_type, DB_TXN *container_db_txn, bool for_checkpoint);
void toku_txn_create_txn(TOKUTXN *txn_ptr, TOKUTXN parent, TOKULOGGER logger, TXN_SNAPSHOT_TYPE snapshot_type, DB_TXN *container_db_txn, XIDS xids, bool for_checkpoint);
void toku_txn_update_xids_in_txn(TOKUTXN txn, TXNID xid);
int toku_txn_load_txninfo (TOKUTXN txn, TXNINFO info);
......
......@@ -270,20 +270,13 @@ static TXNID get_oldest_referenced_xid_unlocked(TXN_MANAGER txn_manager) {
// one live transaction
invariant_zero(r);
struct referenced_xid_tuple* tuple;
if (txn_manager->referenced_xids.size() > 0) {
struct referenced_xid_tuple* tuple;
r = txn_manager->referenced_xids.fetch(0, &tuple);
if (r == 0 && tuple->begin_id < oldest_referenced_xid) {
oldest_referenced_xid = tuple->begin_id;
}
}
if (txn_manager->snapshot_txnids.size() > 0) {
TXNID id;
r = txn_manager->snapshot_txnids.fetch(0, &id);
if (r == 0 && id < oldest_referenced_xid) {
oldest_referenced_xid = id;
}
}
return oldest_referenced_xid;
}
......@@ -499,18 +492,13 @@ void toku_txn_manager_start_txn_for_recovery(
void toku_txn_manager_start_txn(
TOKUTXN txn,
TXN_MANAGER txn_manager,
TXN_SNAPSHOT_TYPE snapshot_type,
bool read_only
TXN_SNAPSHOT_TYPE snapshot_type
)
{
int r;
TXNID xid = TXNID_NONE;
// if we are running in recovery, we don't need to make snapshots
bool needs_snapshot = txn_needs_snapshot(snapshot_type, NULL);
if (read_only && !needs_snapshot) {
inherit_snapshot_from_parent(txn);
goto exit;
}
// perform a malloc outside of the txn_manager lock
// will be used in txn_manager_create_snapshot_unlocked below
......@@ -540,16 +528,14 @@ void toku_txn_manager_start_txn(
// is taken into account when the transaction is closed.
// add ancestor information, and maintain global live root txn list
if (!read_only) {
xid = ++txn_manager->last_xid;
toku_txn_update_xids_in_txn(txn, xid);
uint32_t idx = txn_manager->live_root_txns.size();
r = txn_manager->live_root_txns.insert_at(txn, idx);
invariant_zero(r);
r = txn_manager->live_root_ids.insert_at(txn->txnid.parent_id64, idx);
invariant_zero(r);
txn->oldest_referenced_xid = get_oldest_referenced_xid_unlocked(txn_manager);
}
xid = ++txn_manager->last_xid;
toku_txn_update_xids_in_txn(txn, xid);
uint32_t idx = txn_manager->live_root_txns.size();
r = txn_manager->live_root_txns.insert_at(txn, idx);
invariant_zero(r);
r = txn_manager->live_root_ids.insert_at(txn->txnid.parent_id64, idx);
invariant_zero(r);
txn->oldest_referenced_xid = get_oldest_referenced_xid_unlocked(txn_manager);
if (needs_snapshot) {
txn_manager_create_snapshot_unlocked(
......@@ -562,8 +548,6 @@ void toku_txn_manager_start_txn(
verify_snapshot_system(txn_manager);
}
txn_manager_unlock(txn_manager);
exit:
return;
}
TXNID
......@@ -594,9 +578,6 @@ void toku_txn_manager_finish_txn(TXN_MANAGER txn_manager, TOKUTXN txn) {
int r;
invariant(txn->parent == NULL);
bool is_snapshot = txn_needs_snapshot(txn->snapshot_type, NULL);
if (!is_snapshot && txn_declared_read_only(txn)) {
goto exit;
}
txn_manager_lock(txn_manager);
if (garbage_collection_debug) {
......@@ -612,39 +593,37 @@ void toku_txn_manager_finish_txn(TXN_MANAGER txn_manager, TOKUTXN txn) {
);
}
if (!txn_declared_read_only(txn)) {
uint32_t idx;
//Remove txn from list of live root txns
TOKUTXN txnagain;
r = txn_manager->live_root_txns.find_zero<TOKUTXN, find_xid>(txn, &txnagain, &idx);
invariant_zero(r);
invariant(txn==txnagain);
uint32_t idx;
//Remove txn from list of live root txns
TOKUTXN txnagain;
r = txn_manager->live_root_txns.find_zero<TOKUTXN, find_xid>(txn, &txnagain, &idx);
invariant_zero(r);
invariant(txn==txnagain);
r = txn_manager->live_root_txns.delete_at(idx);
invariant_zero(r);
r = txn_manager->live_root_ids.delete_at(idx);
invariant_zero(r);
r = txn_manager->live_root_txns.delete_at(idx);
invariant_zero(r);
r = txn_manager->live_root_ids.delete_at(idx);
invariant_zero(r);
if (!toku_txn_is_read_only(txn) || garbage_collection_debug) {
if (!is_snapshot) {
//
// If it's a snapshot, we already calculated index_in_snapshot_txnids.
// Otherwise, calculate it now.
//
r = txn_manager->snapshot_txnids.find_zero<TXNID, toku_find_xid_by_xid>(txn->txnid.parent_id64, nullptr, &index_in_snapshot_txnids);
invariant(r == DB_NOTFOUND);
}
uint32_t num_references = txn_manager->snapshot_txnids.size() - index_in_snapshot_txnids;
if (num_references > 0) {
// This transaction exists in a live list of another transaction.
struct referenced_xid_tuple tuple = {
.begin_id = txn->txnid.parent_id64,
.end_id = ++txn_manager->last_xid,
.references = num_references
};
r = txn_manager->referenced_xids.insert<TXNID, find_tuple_by_xid>(tuple, txn->txnid.parent_id64, nullptr);
lazy_assert_zero(r);
}
if (!toku_txn_is_read_only(txn) || garbage_collection_debug) {
if (!is_snapshot) {
//
// If it's a snapshot, we already calculated index_in_snapshot_txnids.
// Otherwise, calculate it now.
//
r = txn_manager->snapshot_txnids.find_zero<TXNID, toku_find_xid_by_xid>(txn->txnid.parent_id64, nullptr, &index_in_snapshot_txnids);
invariant(r == DB_NOTFOUND);
}
uint32_t num_references = txn_manager->snapshot_txnids.size() - index_in_snapshot_txnids;
if (num_references > 0) {
// This transaction exists in a live list of another transaction.
struct referenced_xid_tuple tuple = {
.begin_id = txn->txnid.parent_id64,
.end_id = ++txn_manager->last_xid,
.references = num_references
};
r = txn_manager->referenced_xids.insert<TXNID, find_tuple_by_xid>(tuple, txn->txnid.parent_id64, nullptr);
lazy_assert_zero(r);
}
}
......@@ -659,8 +638,6 @@ void toku_txn_manager_finish_txn(TXN_MANAGER txn_manager, TOKUTXN txn) {
txn->live_root_txn_list->destroy();
toku_free(txn->live_root_txn_list);
}
exit:
return;
}
void toku_txn_manager_clone_state_for_gc(
......
......@@ -58,8 +58,7 @@ void toku_txn_manager_handle_snapshot_destroy_for_child_txn(
void toku_txn_manager_start_txn(
TOKUTXN txn,
TXN_MANAGER txn_manager,
TXN_SNAPSHOT_TYPE snapshot_type,
bool read_only
TXN_SNAPSHOT_TYPE snapshot_type
);
void toku_txn_manager_start_txn_for_recovery(
......
......@@ -62,12 +62,6 @@ xids_get_root_xids(void) {
return rval;
}
bool
xids_can_create_child(XIDS xids) {
invariant(xids->num_xids < MAX_TRANSACTION_RECORDS);
return (xids->num_xids + 1) != MAX_TRANSACTION_RECORDS;
}
int
xids_create_unknown_child(XIDS parent_xids, XIDS *xids_p) {
......@@ -76,15 +70,17 @@ xids_create_unknown_child(XIDS parent_xids, XIDS *xids_p) {
int rval;
invariant(parent_xids);
uint32_t num_child_xids = parent_xids->num_xids + 1;
// assumes that caller has verified that num_child_xids will
// be less than MAX_TRANSACTIN_RECORDS
invariant(num_child_xids < MAX_TRANSACTION_RECORDS);
size_t new_size = sizeof(*parent_xids) + num_child_xids*sizeof(parent_xids->ids[0]);
XIDS CAST_FROM_VOIDP(xids, toku_xmalloc(new_size));
// Clone everything (parent does not have the newest xid).
memcpy(xids, parent_xids, new_size - sizeof(xids->ids[0]));
*xids_p = xids;
rval = 0;
invariant(num_child_xids > 0);
invariant(num_child_xids <= MAX_TRANSACTION_RECORDS);
if (num_child_xids == MAX_TRANSACTION_RECORDS) rval = EINVAL;
else {
size_t new_size = sizeof(*parent_xids) + num_child_xids*sizeof(parent_xids->ids[0]);
XIDS CAST_FROM_VOIDP(xids, toku_xmalloc(new_size));
// Clone everything (parent does not have the newest xid).
memcpy(xids, parent_xids, new_size - sizeof(xids->ids[0]));
*xids_p = xids;
rval = 0;
}
return rval;
}
......@@ -103,13 +99,11 @@ int
xids_create_child(XIDS parent_xids, // xids list for parent transaction
XIDS * xids_p, // xids list created
TXNID this_xid) { // xid of this transaction (new innermost)
bool can_create_child = xids_can_create_child(parent_xids);
if (!can_create_child) {
return EINVAL;
int rval = xids_create_unknown_child(parent_xids, xids_p);
if (rval == 0) {
xids_finalize_with_child(*xids_p, this_xid);
}
xids_create_unknown_child(parent_xids, xids_p);
xids_finalize_with_child(*xids_p, this_xid);
return 0;
return rval;
}
void
......
......@@ -28,8 +28,6 @@
//Retrieve an XIDS representing the root transaction.
XIDS xids_get_root_xids(void);
bool xids_can_create_child(XIDS xids);
void xids_cpy(XIDS target, XIDS source);
//Creates an XIDS representing this transaction.
......
......@@ -161,7 +161,6 @@ toku_indexer_create_indexer(DB_ENV *env,
{
int rval;
DB_INDEXER *indexer = 0; // set later when created
HANDLE_READ_ONLY_TXN(txn);
*indexerp = NULL;
......
......@@ -169,7 +169,6 @@ toku_loader_create_loader(DB_ENV *env,
uint32_t loader_flags,
bool check_empty) {
int rval;
HANDLE_READ_ONLY_TXN(txn);
*blp = NULL; // set later when created
......
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS _GNU_SOURCE DONT_DEPRECATE_ERRNO)
if(BUILD_TESTING OR BUILD_SRC_TESTS)
function(add_ydb_test bin)
add_toku_test(ydb ${bin} ${ARGN})
endfunction(add_ydb_test)
function(add_ydb_test_aux name bin)
add_toku_test_aux(ydb ${name} ${bin} ${ARGN})
endfunction(add_ydb_test_aux)
function(add_ydb_helgrind_test bin)
add_helgrind_test(ydb helgrind_${bin} $<TARGET_FILE:${bin}> ${ARGN})
endfunction(add_ydb_helgrind_test)
function(add_ydb_drd_test_aux name bin)
add_drd_test(ydb ${name} $<TARGET_FILE:${bin}> ${ARGN})
endfunction(add_ydb_drd_test_aux)
function(add_ydb_drd_test bin)
add_ydb_drd_test_aux(drd_${bin} ${bin} ${ARGN})
endfunction(add_ydb_drd_test)
file(GLOB transparent_upgrade_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" upgrade*.cc)
file(GLOB tdb_dontrun_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" bdb-simple-deadlock*.cc)
string(REGEX REPLACE "\\.cc(;|$)" "\\1" tdb_dontrun_tests "${tdb_dontrun_srcs}")
file(GLOB srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" *.cc)
list(REMOVE_ITEM srcs ${transparent_upgrade_srcs})
set(recover_srcs test_log2.cc test_log3.cc test_log4.cc test_log5.cc test_log6.cc test_log7.cc test_log8.cc test_log9.cc test_log10.cc)
file(GLOB abortrecover_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" recover-*.cc)
file(GLOB loader_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" loader-*.cc)
file(GLOB stress_test_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" test_stress*.cc)
string(REGEX REPLACE "\\.cc(;|$)" ".recover\\1" recover_tests "${recover_srcs}")
string(REGEX REPLACE "\\.cc(;|$)" ".abortrecover\\1" abortrecover_tests "${abortrecover_srcs}")
string(REGEX REPLACE "\\.cc(;|$)" ".loader\\1" loader_tests "${loader_srcs}")
string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" stress_tests "${stress_test_srcs}")
set(tdb_srcs ${srcs})
list(REMOVE_ITEM tdb_srcs ${tdb_dontrun_srcs})
string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" tdb_bins "${tdb_srcs}")
list(REMOVE_ITEM tdb_srcs ${abortrecover_srcs} ${loader_srcs})
string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" tdb_tests "${tdb_srcs}")
if(BDB_FOUND)
set(bdb_dontrun_srcs
backwards_10_each_le_and_msg
blackhole
blocking-prelock-range
blocking-set-range-reverse-0
blocking-table-lock
bug1381
bug627
cachetable-race
checkpoint_1
checkpoint_callback
checkpoint_stress
cursor-isolation
cursor-set-del-rmw
cursor-set-range-rmw
db-put-simple-deadlock
del-simple
del-multiple
del-multiple-huge-primary-row
del-multiple-srcdb
directory_lock
diskfull
dump-env
env-put-multiple
env_startup
execute-updates
filesize
helgrind1
helgrind2
helgrind3
hotindexer-bw
hotindexer-error-callback
hotindexer-insert-committed-optimized
hotindexer-insert-committed
hotindexer-insert-provisional
hotindexer-lock-test
hotindexer-multiclient
hotindexer-nested-insert-committed
hotindexer-put-abort
hotindexer-put-commit
hotindexer-put-multiple
hotindexer-simple-abort
hotindexer-simple-abort-put
hotindexer-undo-do-test
hotindexer-with-queries
hot-optimize-table-tests
insert-dup-prelock
isolation
isolation-read-committed
keyrange
keyrange-merge
last-verify-time
loader-cleanup-test
loader-create-abort
loader-create-close
loader-dup-test
loader-no-puts
loader-reference-test
loader-stress-del
loader-stress-test
loader-tpch-load
lock-pressure
manyfiles
maxsize-for-loader
multiprocess
mvcc-create-table
mvcc-many-committed
mvcc-read-committed
perf_checkpoint_var
perf_child_txn
perf_cursor_nop
perf_iibench
perf_insert
perf_insert_multiple
perf_malloc_free
perf_nop
perf_ptquery
perf_ptquery2
perf_rangequery
perf_read_txn
perf_read_txn_single_thread
perf_read_write
perf_txn_single_thread
perf_xmalloc_free
prelock-read-read
prelock-read-write
prelock-write-read
prelock-write-write
print_engine_status
powerfail
preload-db
preload-db-nested
progress
put-multiple
queries_with_deletes
recover-2483
recover-3113
recover-5146
recover-compare-db
recover-compare-db-descriptor
recover-del-multiple
recover-del-multiple-abort
recover-del-multiple-srcdb-fdelete-all
recover-delboth-after-checkpoint
recover-delboth-checkpoint
recover-descriptor
recover-descriptor2
recover-descriptor3
recover-descriptor4
recover-descriptor5
recover-descriptor6
recover-descriptor7
recover-descriptor8
recover-descriptor9
recover-descriptor10
recover-descriptor11
recover-descriptor12
recover-fclose-in-checkpoint
recover-fcreate-basementnodesize
recover-flt1
recover-flt2
recover-flt3
recover-flt4
recover-flt5
recover-flt6
recover-flt7
recover-flt8
recover-flt9
recover-flt10
recover-hotindexer-simple-abort-put
recover-loader-test
recover-lsn-filter-multiple
recover-put-multiple
recover-put-multiple-abort
recover-put-multiple-fdelete-all
recover-put-multiple-fdelete-some
recover-put-multiple-srcdb-fdelete-all
recover-split-checkpoint
recover-tablelock
recover-test-logsuppress
recover-test-logsuppress-put
recover-test_stress1
recover-test_stress2
recover-test_stress3
recover-test_stress_openclose
recover-upgrade-db-descriptor-multihandle
recover-upgrade-db-descriptor
recover-update-multiple
recover-update-multiple-abort
recover-update_aborts
recover-update_aborts_before_checkpoint
recover-update_aborts_before_close
recover-update_changes_values
recover-update_changes_values_before_checkpoint
recover-update_changes_values_before_close
recover-update_broadcast_aborts
recover-update_broadcast_aborts2
recover-update_broadcast_aborts3
recover-update_broadcast_aborts_before_checkpoint
recover-update_broadcast_aborts_before_close
recover-update_broadcast_changes_values
recover-update_broadcast_changes_values2
recover-update_broadcast_changes_values3
recover-update_broadcast_changes_values_before_checkpoint
recover-update_broadcast_changes_values_before_close
recover-update_changes_values_before_close
recovery_fileops_stress
recovery_fileops_unit
recovery_stress
redirect
replace-into-write-lock
root_fifo_2
root_fifo_32
root_fifo_41
seqinsert
shutdown-3344
stat64
stat64-create-modify-times
stat64_flatten
stat64-null-txn
stat64-root-changes
stress-gc
stress-gc2
test-xa-prepare
test1324
test1572
test3219
test3522
test3522b
test3529
test_3645
test_3529_insert_2
test_3529_table_lock
test_3755
test_4015
test_4368
test_4657
test_5015
test_5469
test-5138
test938c
test_abort1
test_abort4
test_abort5
test_blobs_leaf_split
test_bulk_fetch
test_compression_methods
test_cmp_descriptor
test_cursor_with_read_txn
test_db_change_pagesize
test_db_change_xxx
test_cursor_delete_2119
test_db_descriptor
test_db_descriptor_named_db
test_db_txn_locks_read_uncommitted
test_get_max_row_size
test_large_update_broadcast_small_cachetable
test_locktree_close
test_logflush
test_multiple_checkpoints_block_commit
test_query
test_read_txn_invalid_ops
test_redirect_func
test_row_size_supported
test_simple_read_txn
test_stress0
test_stress1
test_stress2
test_stress3
test_stress4
test_stress5
test_stress6
test_stress7
test_stress_openclose
test_stress_with_verify
test_stress_hot_indexing
test_transactional_descriptor
test_trans_desc_during_chkpt
test_trans_desc_during_chkpt2
test_trans_desc_during_chkpt3
test_trans_desc_during_chkpt4
test_txn_abort6
test_txn_abort8
test_txn_abort9
test_txn_close_open_commit
test_txn_commit8
test_txn_nested1
test_txn_nested2
test_txn_nested3
test_txn_nested4
test_txn_nested5
test_update_abort_works
test_update_calls_back
test_update_can_delete_elements
test_update_changes_values
test_update_nonexistent_keys
test_update_previously_deleted
test_update_stress
test_update_txn_snapshot_works_concurrently
test_update_txn_snapshot_works_correctly_with_deletes
test_update_broadcast_abort_works
test_update_broadcast_calls_back
test_update_broadcast_can_delete_elements
test_update_broadcast_changes_values
test_update_broadcast_previously_deleted
test_update_broadcast_stress
test_update_broadcast_update_fun_has_choices
test_update_broadcast_with_empty_table
test_update_broadcast_indexer
test_update_broadcast_loader
test_update_broadcast_nested_updates
test_update_nested_updates
test_update_with_empty_table
test_updates_single_key
txn-ignore
transactional_fileops
update-multiple-data-diagonal
update-multiple-key0
update-multiple-nochange
update-multiple-with-indexer
update
upgrade_simple
upgrade-test-1
upgrade-test-2
upgrade-test-3
upgrade-test-4
upgrade-test-5
upgrade-test-6
upgrade-test-7
zombie_db
)
set(bdb_srcs ${srcs})
string(REGEX REPLACE "\\.cc(;|$)" "\\1" bdb_testbases "${bdb_srcs}")
list(REMOVE_ITEM bdb_testbases ${bdb_dontrun_srcs})
string(REGEX REPLACE "(.)(;|$)" "\\1.bdb\\2" bdb_tests "${bdb_testbases}")
set(bdb_bins ${bdb_tests})
endif()
set(tdb_tests_that_should_fail
test_db_no_env.tdb
test_log8.recover
test_log9.recover
test_log10.recover
recover-missing-dbfile.abortrecover
recover-missing-dbfile-2.abortrecover
loader-tpch-load.loader
)
## #5138 only reproduces when using the static library.
list(REMOVE_ITEM tdb_bins test-5138.tdb)
add_executable(test-5138.tdb test-5138)
target_link_libraries(test-5138.tdb ${LIBTOKUDB}_static z ${LIBTOKUPORTABILITY}_static ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS})
set_property(TARGET test-5138.tdb APPEND PROPERTY
COMPILE_DEFINITIONS "USE_TDB;IS_TDB=1;TOKUDB=1")
add_space_separated_property(TARGET test-5138.tdb COMPILE_FLAGS -fvisibility=hidden)
add_ydb_test(test-5138.tdb)
foreach(bin ${tdb_bins})
get_filename_component(base ${bin} NAME_WE)
add_executable(${base}.tdb ${base})
# Some of the symbols in util may not be exported properly by
# libtokudb.so.
# We link the test with util directly so that the test code itself can use
# some of those things (i.e. kibbutz in the threaded tests).
target_link_libraries(${base}.tdb util ${LIBTOKUDB} ${LIBTOKUPORTABILITY})
set_property(TARGET ${base}.tdb APPEND PROPERTY
COMPILE_DEFINITIONS "USE_TDB;IS_TDB=1;TOKUDB=1")
add_space_separated_property(TARGET ${base}.tdb COMPILE_FLAGS -fvisibility=hidden)
endforeach(bin)
if(BDB_FOUND)
foreach(bin ${bdb_bins})
get_filename_component(base ${bin} NAME_WE)
add_executable(${base}.bdb ${base})
set_property(TARGET ${base}.bdb APPEND PROPERTY
COMPILE_DEFINITIONS "USE_BDB;IS_TDB=0;TOKU_ALLOW_DEPRECATED")
set_target_properties(${base}.bdb PROPERTIES
INCLUDE_DIRECTORIES "${BDB_INCLUDE_DIR};${CMAKE_CURRENT_BINARY_DIR}/../../toku_include;${CMAKE_CURRENT_SOURCE_DIR}/../../toku_include;${CMAKE_CURRENT_SOURCE_DIR}/../../portability;${CMAKE_CURRENT_SOURCE_DIR}/../..")
target_link_libraries(${base}.bdb ${LIBTOKUPORTABILITY} ${BDB_LIBRARIES})
add_space_separated_property(TARGET ${base}.bdb COMPILE_FLAGS -fvisibility=hidden)
endforeach(bin)
endif()
foreach(bin loader-cleanup-test.tdb diskfull.tdb)
set_property(TARGET ${bin} APPEND PROPERTY
COMPILE_DEFINITIONS DONT_DEPRECATE_WRITES)
endforeach(bin)
macro(declare_custom_tests)
foreach(test ${ARGN})
list(REMOVE_ITEM tdb_tests ${test})
endforeach(test)
endmacro(declare_custom_tests)
declare_custom_tests(test1426.tdb)
if(BDB_FOUND)
macro(declare_custom_bdb_tests)
foreach(test ${ARGN})
list(REMOVE_ITEM bdb_tests ${test})
endforeach(test)
endmacro(declare_custom_bdb_tests)
declare_custom_bdb_tests(test1426.bdb)
configure_file(run_test1426.sh . COPYONLY)
add_test(NAME ydb/test1426.tdb
COMMAND run_test1426.sh
$<TARGET_FILE:test1426.tdb> $<TARGET_FILE:test1426.bdb>
"test1426.tdb.ctest-data" "test1426.bdb.ctest-data"
$<TARGET_FILE:tokudb_dump> "${BDB_INCLUDE_DIR}/../bin/db_dump")
add_dependencies(test1426.tdb tokudb_dump)
endif()
string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" recover_would_be_tdb_tests "${recover_srcs}")
declare_custom_tests(${recover_would_be_tdb_tests})
declare_custom_tests(powerfail.tdb)
add_test(ydb/powerfail.tdb echo must run powerfail by hand)
declare_custom_tests(checkpoint_stress.tdb)
configure_file(run_checkpoint_stress_test.sh . COPYONLY)
add_test(NAME ydb/checkpoint_stress.tdb
COMMAND run_checkpoint_stress_test.sh $<TARGET_FILE:checkpoint_stress.tdb> 5 5001 137)
setup_toku_test_properties(ydb/checkpoint_stress.tdb checkpoint_stress.tdb)
configure_file(run_recover_stress_test.sh . COPYONLY)
add_test(NAME ydb/recover_stress.tdb
COMMAND run_recover_stress_test.sh $<TARGET_FILE:checkpoint_stress.tdb> 5 5001 137)
setup_toku_test_properties(ydb/recover_stress.tdb recover_stress.tdb)
declare_custom_tests(diskfull.tdb)
configure_file(run_diskfull_test.sh . COPYONLY)
add_test(NAME ydb/diskfull.tdb
COMMAND run_diskfull_test.sh $<TARGET_FILE:diskfull.tdb> 134)
setup_toku_test_properties(ydb/diskfull.tdb diskfull.tdb)
declare_custom_tests(recovery_fileops_unit.tdb)
configure_file(run_recovery_fileops_unit.sh . COPYONLY)
file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/recovery_fileops_unit_dir")
foreach(ov c d r)
if (ov STREQUAL c)
set(gset 0)
set(hset 0)
else ()
set(gset 0 1 2 3 4 5)
set(hset 0 1)
endif ()
foreach(av 0 1)
foreach(bv 0 1)
if (bv)
set(dset 0 1)
set(eset 0 1)
else ()
set(dset 0)
set(eset 0)
endif ()
foreach(cv 0 1 2)
foreach(dv ${dset})
foreach(ev ${eset})
foreach(fv 0 1)
foreach(gv ${gset})
foreach(hv ${hset})
if ((NOT ov STREQUAL c) AND (NOT cv) AND ((NOT bv) OR (NOT ev) OR (dv)))
set(iset 0 1)
else ()
set(iset 0)
endif ()
foreach(iv ${iset})
set(testname "ydb/recovery_fileops_unit.${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}")
set(envdir "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}")
set(errfile "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}.ctest-errors")
add_test(NAME ${testname}
COMMAND run_recovery_fileops_unit.sh $<TARGET_FILE:recovery_fileops_unit.tdb> ${errfile} 137
-O ${ov} -A ${av} -B ${bv} -C ${cv} -D ${dv} -E ${ev} -F ${fv} -G ${gv} -H ${hv} -I ${iv}
)
setup_toku_test_properties(${testname} ${envdir})
set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${errfile}")
endforeach(iv)
endforeach(hv)
endforeach(gv)
endforeach(fv)
endforeach(ev)
endforeach(dv)
endforeach(cv)
endforeach(bv)
endforeach(av)
endforeach(ov)
if (NOT (CMAKE_SYSTEM_NAME MATCHES Darwin OR
(CMAKE_CXX_COMPILER_ID STREQUAL Intel AND
CMAKE_BUILD_TYPE STREQUAL Release)
OR USE_GCOV))
declare_custom_tests(helgrind1.tdb)
add_test(NAME ydb/helgrind_helgrind1.tdb
COMMAND valgrind --quiet --tool=helgrind --error-exitcode=1 --log-file=helgrind1.tdb.deleteme $<TARGET_FILE:helgrind1.tdb>)
setup_toku_test_properties(ydb/helgrind_helgrind1.tdb helgrind_helgrind1.tdb)
set_tests_properties(ydb/helgrind_helgrind1.tdb PROPERTIES WILL_FAIL TRUE)
endif()
declare_custom_tests(helgrind2.tdb)
declare_custom_tests(helgrind3.tdb)
add_ydb_helgrind_test(helgrind2.tdb)
add_ydb_helgrind_test(helgrind3.tdb)
declare_custom_tests(test_groupcommit_count.tdb)
add_ydb_test(test_groupcommit_count.tdb -n 1)
add_ydb_helgrind_test(test_groupcommit_count.tdb -n 1)
add_ydb_drd_test(test_groupcommit_count.tdb -n 1)
add_ydb_drd_test(test_4015.tdb)
# We link the locktree so that stress test 0 can call some
# functions (ie: lock escalation) directly.
target_link_libraries(test_stress0.tdb locktree)
# Set up default stress tests and drd tests. Exclude hot_index.
foreach(src ${stress_test_srcs})
if(NOT ${src} MATCHES hot_index)
get_filename_component(base ${src} NAME_WE)
set(test ${base}.tdb)
if (${src} MATCHES test_stress0)
add_ydb_test(${test} --num_elements 512 --num_seconds 1000 --join_timeout 600)
else ()
add_ydb_test(${test} --num_elements 150000 --num_seconds 1000 --join_timeout 600)
endif ()
add_ydb_drd_test_aux(drd_tiny_${test} ${test} --num_seconds 5 --num_elements 150 --join_timeout 3000)
set_tests_properties(ydb/drd_tiny_${test} PROPERTIES TIMEOUT 3600)
add_test(ydb/drd_mid_${test}/prepare ${test} --only_create --num_elements 10000)
setup_toku_test_properties(ydb/drd_mid_${test}/prepare drd_mid_${test})
add_ydb_drd_test_aux(drd_mid_${test} ${test} --only_stress --num_elements 10000 --num_seconds 100 --join_timeout 14400)
set_tests_properties(ydb/drd_mid_${test} PROPERTIES
DEPENDS ydb/drd_mid_${test}/prepare
REQUIRED_FILES "drd_mid_${test}.ctest-data"
TIMEOUT 15000
)
add_test(ydb/drd_large_${test}/prepare ${test} --only_create --num_elements 150000)
setup_toku_test_properties(ydb/drd_large_${test}/prepare drd_large_${test})
add_ydb_drd_test_aux(drd_large_${test} ${test} --only_stress --num_elements 150000 --num_seconds 1000 --join_timeout 28800)
set_tests_properties(ydb/drd_large_${test} PROPERTIES
DEPENDS ydb/drd_large_${test}/prepare
REQUIRED_FILES "drd_large_${test}.ctest-data"
TIMEOUT 30000
)
endif()
endforeach(src)
# Set up upgrade tests. Exclude test_stress_openclose
foreach(src ${stress_test_srcs})
if (NOT ${src} MATCHES test_stress_openclose)
get_filename_component(base ${src} NAME_WE)
set(test ${base}.tdb)
foreach(oldver 4.2.0 5.0.8 5.2.7 6.0.0 6.1.0 6.5.1 6.6.3)
set(versiondir ${TOKU_SVNROOT}/tokudb.data/old-stress-test-envs/${oldver})
if (NOT EXISTS "${versiondir}/")
message(WARNING "Test data for upgrade tests for version ${oldver} doesn't exist, check out ${versiondir}/*-2000-dir first or upgrade stress tests may fail.")
endif ()
foreach(p_or_s pristine stressed)
if (NOT (${base} MATCHES test_stress4 AND ${p_or_s} MATCHES stressed))
foreach(size 2000)
set(oldenvdir "${versiondir}/saved${p_or_s}-${size}-dir")
set(envdirbase "${upgrade}_${oldver}_${p_or_s}_${size}_${test}")
set(envdir "${envdirbase}.ctest-data")
set(testnamebase ydb/${test}/upgrade/${oldver}/${p_or_s}/${size})
add_test(NAME ${testnamebase}/remove
COMMAND ${CMAKE_COMMAND} -E remove_directory "${envdir}")
add_test(NAME ${testnamebase}/copy
COMMAND ${CMAKE_COMMAND} -E copy_directory "${oldenvdir}" "${envdir}")
set_tests_properties(${testnamebase}/copy PROPERTIES
DEPENDS ${testnamebase}/remove
REQUIRED_FILES "${oldenvdir}")
add_test(NAME ${testnamebase}
COMMAND ${test} --only_stress --num_elements ${size} --num_seconds 600 --join_timeout 7200)
setup_toku_test_properties(${testnamebase} "${envdirbase}")
set_tests_properties(${testnamebase} PROPERTIES
DEPENDS ${testnamebase}/copy
REQUIRED_FILES "${envdir}"
TIMEOUT 10800)
endforeach(size)
endif ()
endforeach(p_or_s)
endforeach(oldver)
endif ()
endforeach(src)
if (NOT EXISTS "${TOKU_SVNROOT}/tokudb.data/test_5902/")
message(WARNING "Test data for dump-env.tdb doesn't exist, check out ${TOKU_SVNROOT}/tokudb.data/test_5902 first or dump-env.tdb may fail.")
endif ()
declare_custom_tests(dump-env.tdb)
add_test(NAME ydb/dump-env.tdb/remove
COMMAND ${CMAKE_COMMAND} -E remove_directory "dump-env.tdb.ctest-data")
add_test(NAME ydb/dump-env.tdb/copy
COMMAND ${CMAKE_COMMAND} -E copy_directory "${TOKU_SVNROOT}/tokudb.data/test_5902" "dump-env.tdb.ctest-data")
set_tests_properties(ydb/dump-env.tdb/copy PROPERTIES
DEPENDS ydb/dump-env.tdb/remove
REQUIRED_FILES "${TOKU_SVNROOT}/tokudb.data/test_5902")
add_ydb_test(dump-env.tdb)
set_tests_properties(ydb/dump-env.tdb PROPERTIES
DEPENDS ydb/dump-env.tdb/copy
REQUIRED_FILES "dump-env.tdb.ctest-data")
## for some reason this rule doesn't run with the makefile and it crashes with this rule, so I'm disabling this special case
#declare_custom_tests(test_thread_stack.tdb)
#add_custom_command(OUTPUT run_test_thread_stack.sh
# COMMAND install "${CMAKE_CURRENT_SOURCE_DIR}/run_test_thread_stack.sh" "${CMAKE_CFG_INTDIR}"
# MAIN_DEPENDENCY run_test_thread_stack.sh
# VERBATIM)
#add_custom_target(install_run_test_thread_stack.sh ALL DEPENDS run_test_thread_stack.sh)
#add_test(ydb/test_thread_stack.tdb run_test_thread_stack.sh "${CMAKE_CFG_INTDIR}/test_thread_stack.tdb")
declare_custom_tests(root_fifo_41.tdb)
foreach(num RANGE 1 100)
add_ydb_test_aux(root_fifo_41_${num}_populate.tdb root_fifo_41.tdb -n ${num} -populate)
add_ydb_test_aux(root_fifo_41_${num}_nopopulate.tdb root_fifo_41.tdb -n ${num})
endforeach(num)
add_ydb_test_aux(test3039_small.tdb test3039.tdb -n 1000)
declare_custom_tests(test_abort4.tdb)
foreach(num RANGE -1 19)
add_ydb_test_aux(test_abort4_${num}_0.tdb test_abort4.tdb -c 0 -l ${num})
add_ydb_test_aux(test_abort4_${num}_1.tdb test_abort4.tdb -c 1 -l ${num})
endforeach(num)
set(old_loader_upgrade_data "${TOKU_SVNROOT}/tokudb.data/env_preload.4.2.0.emptydictionaries.cleanshutdown")
if (NOT EXISTS "${old_loader_upgrade_data}/")
message(WARNING "Test data for loader upgrade tests doesn't exist, check out ${old_loader_upgrade_data} first, or loader-stress-test3.tdb may fail.")
endif ()
function(add_loader_upgrade_test name bin)
add_test(NAME ydb/${name}/remove
COMMAND ${CMAKE_COMMAND} -E remove_directory "${name}.ctest-data")
add_test(NAME ydb/${name}/copy
COMMAND ${CMAKE_COMMAND} -E copy_directory "${old_loader_upgrade_data}" "${name}.ctest-data")
set_tests_properties(ydb/${name}/copy PROPERTIES
DEPENDS ydb/${name}/remove
REQUIRED_FILES "${old_loader_upgrade_data}")
add_ydb_test_aux(${name} ${bin} -u ${ARGN})
set_tests_properties(ydb/${name} PROPERTIES
DEPENDS ydb/${name}/copy
REQUIRED_FILES "${name}.ctest-data")
endfunction(add_loader_upgrade_test)
list(REMOVE_ITEM loader_tests loader-stress-test.loader)
add_ydb_test_aux(loader-stress-test0.tdb loader-stress-test.tdb -c)
add_ydb_test_aux(loader-stress-test1.tdb loader-stress-test.tdb -c -p)
add_ydb_test_aux(loader-stress-test2.tdb loader-stress-test.tdb -r 5000 -s)
add_loader_upgrade_test(loader-stress-test3.tdb loader-stress-test.tdb -c)
add_ydb_test_aux(loader-stress-test4.tdb loader-stress-test.tdb -r 10000000 -c)
add_ydb_test_aux(loader-stress-test0z.tdb loader-stress-test.tdb -c -z)
add_ydb_test_aux(loader-stress-test1z.tdb loader-stress-test.tdb -c -p -z)
add_ydb_test_aux(loader-stress-test2z.tdb loader-stress-test.tdb -r 5000 -s -z)
add_loader_upgrade_test(loader-stress-test3z.tdb loader-stress-test.tdb -c -z)
add_ydb_test_aux(loader-stress-test4z.tdb loader-stress-test.tdb -r 500000 -c -z --valsize 28)
list(REMOVE_ITEM loader_tests loader-dup-test.loader)
add_ydb_test_aux(loader-dup-test0.tdb loader-dup-test.tdb)
add_ydb_test_aux(loader-dup-test1.tdb loader-dup-test.tdb -d 1 -r 500000)
add_ydb_test_aux(loader-dup-test2.tdb loader-dup-test.tdb -d 1 -r 1000000)
add_ydb_test_aux(loader-dup-test3.tdb loader-dup-test.tdb -d 1 -s -r 100)
add_ydb_test_aux(loader-dup-test4.tdb loader-dup-test.tdb -d 1 -s -r 1000)
add_ydb_test_aux(loader-dup-test5.tdb loader-dup-test.tdb -d 1 -s -r 1000 -E)
add_ydb_test_aux(loader-dup-test0z.tdb loader-dup-test.tdb -z)
add_ydb_test_aux(loader-dup-test1z.tdb loader-dup-test.tdb -d 1 -r 500000 -z)
add_ydb_test_aux(loader-dup-test2z.tdb loader-dup-test.tdb -d 1 -r 1000000 -z)
add_ydb_test_aux(loader-dup-test3z.tdb loader-dup-test.tdb -d 1 -s -r 100 -z)
add_ydb_test_aux(loader-dup-test4z.tdb loader-dup-test.tdb -d 1 -s -r 1000 -z)
add_ydb_test_aux(loader-dup-test5z.tdb loader-dup-test.tdb -d 1 -s -r 1000 -E -z)
## as part of #4503, we took out test 1 and 3
list(REMOVE_ITEM loader_tests loader-cleanup-test.loader)
add_ydb_test_aux(loader-cleanup-test0.tdb loader-cleanup-test.tdb -s -r 800)
#add_ydb_test_aux(loader-cleanup-test1.tdb loader-cleanup-test.tdb -s -r 800 -p)
add_ydb_test_aux(loader-cleanup-test2.tdb loader-cleanup-test.tdb -s -r 8000)
#add_ydb_test_aux(loader-cleanup-test3.tdb loader-cleanup-test.tdb -s -r 8000 -p)
add_ydb_test_aux(loader-cleanup-test0z.tdb loader-cleanup-test.tdb -s -r 800 -z)
add_ydb_test_aux(loader-cleanup-test2z.tdb loader-cleanup-test.tdb -s -r 8000 -z)
declare_custom_tests(keyrange.tdb)
add_ydb_test_aux(keyrange-get0.tdb keyrange.tdb --get 0)
add_ydb_test_aux(keyrange-get1.tdb keyrange.tdb --get 1)
if (0)
add_ydb_test_aux(keyrange-random-get0.tdb keyrange.tdb --get 0 --random_keys 1)
add_ydb_test_aux(keyrange-random-get1.tdb keyrange.tdb --get 1 --random_keys 1)
else ()
message(WARNING "TODO(leif): re-enable keyrange tests, see #5666")
endif ()
add_ydb_test_aux(keyrange-loader-get0.tdb keyrange.tdb --get 0 --loader 1)
add_ydb_test_aux(keyrange-loader-get1.tdb keyrange.tdb --get 1 --loader 1)
declare_custom_tests(maxsize-for-loader.tdb)
add_ydb_test_aux(maxsize-for-loader-A.tdb maxsize-for-loader.tdb -f -c)
add_ydb_test_aux(maxsize-for-loader-B.tdb maxsize-for-loader.tdb -c)
add_ydb_test_aux(maxsize-for-loader-Az.tdb maxsize-for-loader.tdb -f -z -c)
add_ydb_test_aux(maxsize-for-loader-Bz.tdb maxsize-for-loader.tdb -z -c)
declare_custom_tests(hotindexer-undo-do-test.tdb)
file(GLOB hotindexer_tests RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "hotindexer-undo-do-tests/*.test")
file(GLOB hotindexer_results RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "hotindexer-undo-do-tests/*.result")
configure_file(run-hotindexer-undo-do-tests.bash . COPYONLY)
foreach(result ${hotindexer_results})
configure_file(${result} ${result} COPYONLY)
endforeach(result)
foreach(test ${hotindexer_tests})
configure_file(${test} ${test} COPYONLY)
add_test(NAME ydb/${test} COMMAND run-hotindexer-undo-do-tests.bash ${test})
setup_toku_test_properties(ydb/${test} ${test})
endforeach()
foreach(test ${tdb_tests} ${bdb_tests})
add_ydb_test(${test})
endforeach(test)
configure_file(run_recover_test.sh . COPYONLY)
foreach(recover_test ${recover_tests})
get_filename_component(base ${recover_test} NAME_WE)
add_test(NAME ydb/${recover_test}
COMMAND run_recover_test.sh $<TARGET_FILE:${base}.tdb> "${recover_test}.ctest-data" $<TARGET_FILE:tdb-recover> $<TARGET_FILE:tokudb_dump>)
setup_toku_test_properties(ydb/${recover_test} ${recover_test})
endforeach(recover_test)
configure_file(run_abortrecover_test.sh . COPYONLY)
foreach(abortrecover_test ${abortrecover_tests})
get_filename_component(base ${abortrecover_test} NAME_WE)
add_test(NAME ydb/${abortrecover_test}
COMMAND run_abortrecover_test.sh $<TARGET_FILE:${base}.tdb>)
setup_toku_test_properties(ydb/${abortrecover_test} ${abortrecover_test})
endforeach(abortrecover_test)
## alternate implementation, doesn't work because the abort phase crashes and we can't tell cmake that's expected
# foreach(abortrecover_test ${abortrecover_tests})
# get_filename_component(base ${abortrecover_test} NAME_WE)
# set(test ${base}.tdb)
# add_test(NAME ydb/${test}/abort
# COMMAND ${test} --test)
# setup_toku_test_properties(ydb/${test}/abort ${abortrecover_test})
# set_tests_properties(ydb/${test}/abort PROPERTIES WILL_FAIL TRUE)
# add_test(NAME ydb/${test}/recover
# COMMAND ${test} --recover)
# setup_toku_test_properties(ydb/${test}/recover ${abortrecover_test})
# set_tests_properties(ydb/${test}/recover PROPERTIES
# DEPENDS ydb/${test}/abort
# REQUIRED_FILES "${abortrecover_test}.ctest-data")
# endforeach(abortrecover_test)
foreach(loader_test ${loader_tests})
get_filename_component(base ${loader_test} NAME_WE)
add_ydb_test_aux(${base}.nop.loader ${base}.tdb)
add_ydb_test_aux(${base}.p.loader ${base}.tdb -p)
add_ydb_test_aux(${base}.comp.loader ${base}.tdb -z)
if("${tdb_tests_that_should_fail}" MATCHES "${base}.loader")
list(REMOVE_ITEM tdb_tests_that_should_fail ${base}.loader)
list(APPEND tdb_tests_that_should_fail ${base}.nop.loader ${base}.p.loader ${base}.comp.loader)
endif()
endforeach(loader_test)
set(tdb_tests_that_should_fail "ydb/${tdb_tests_that_should_fail}")
string(REGEX REPLACE ";" ";ydb/" tdb_tests_that_should_fail "${tdb_tests_that_should_fail}")
set_tests_properties(${tdb_tests_that_should_fail} PROPERTIES WILL_FAIL TRUE)
## give some tests, that time out normally, 1 hour to complete
set(long_tests
ydb/checkpoint_1.tdb
ydb/drd_test_groupcommit_count.tdb
ydb/env-put-multiple.tdb
ydb/filesize.tdb
ydb/loader-cleanup-test0.tdb
ydb/loader-cleanup-test0z.tdb
ydb/manyfiles.tdb
ydb/recover-loader-test.abortrecover
ydb/recovery_fileops_stress.tdb
ydb/root_fifo_1.tdb
ydb/root_fifo_2.tdb
ydb/root_fifo_31.tdb
ydb/root_fifo_32.tdb
ydb/shutdown-3344.tdb
ydb/stat64-create-modify-times.tdb
ydb/test1572.tdb
ydb/test_abort4_19_0.tdb
ydb/test_abort4_19_1.tdb
ydb/test_abort5.tdb
ydb/test_archive1.tdb
ydb/test_logmax.tdb
ydb/test_query.tdb
ydb/test_txn_abort5.tdb
ydb/test_txn_abort5a.tdb
ydb/test_txn_abort6.tdb
ydb/test_txn_nested2.tdb
ydb/test_txn_nested4.tdb
ydb/test_txn_nested5.tdb
ydb/test_update_broadcast_stress.tdb
)
if (BDB_FOUND)
list(APPEND long_tests
ydb/root_fifo_1.bdb
ydb/root_fifo_31.bdb
ydb/rowsize.bdb
ydb/test_log10.bdb
ydb/test_log7.bdb
ydb/test_logmax.bdb
)
endif (BDB_FOUND)
set_tests_properties(${long_tests} PROPERTIES TIMEOUT 3600)
## some take even longer, with valgrind
set(extra_long_tests
ydb/drd_test_4015.tdb
ydb/hotindexer-with-queries.tdb
ydb/hot-optimize-table-tests.tdb
ydb/loader-cleanup-test2.tdb
ydb/loader-cleanup-test2z.tdb
ydb/loader-dup-test0.tdb
ydb/loader-stress-del.nop.loader
ydb/loader-stress-del.p.loader
ydb/loader-stress-del.comp.loader
ydb/test3039.tdb
ydb/test_update_stress.tdb
)
if (BDB_FOUND)
list(APPEND extra_long_tests
ydb/test_groupcommit_count.bdb
)
endif (BDB_FOUND)
set_tests_properties(${extra_long_tests} PROPERTIES TIMEOUT 7200)
## these really take a long time with valgrind
set(phenomenally_long_tests
ydb/checkpoint_stress.tdb
ydb/loader-stress-test4.tdb
ydb/loader-stress-test4z.tdb
ydb/recover_stress.tdb
ydb/test3529.tdb
)
if (BDB_FOUND)
list(APPEND phenomenally_long_tests
ydb/test1426.tdb
)
endif (BDB_FOUND)
set_tests_properties(${phenomenally_long_tests} PROPERTIES TIMEOUT 14400)
endif(BUILD_TESTING OR BUILD_SRC_TESTS)
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS _GNU_SOURCE DONT_DEPRECATE_ERRNO)
if(BUILD_TESTING OR BUILD_SRC_TESTS)
function(add_ydb_test bin)
add_toku_test(ydb ${bin} ${ARGN})
endfunction(add_ydb_test)
function(add_ydb_test_aux name bin)
add_toku_test_aux(ydb ${name} ${bin} ${ARGN})
endfunction(add_ydb_test_aux)
function(add_ydb_helgrind_test bin)
add_helgrind_test(ydb helgrind_${bin} $<TARGET_FILE:${bin}> ${ARGN})
endfunction(add_ydb_helgrind_test)
function(add_ydb_drd_test_aux name bin)
add_drd_test(ydb ${name} $<TARGET_FILE:${bin}> ${ARGN})
endfunction(add_ydb_drd_test_aux)
function(add_ydb_drd_test bin)
add_ydb_drd_test_aux(drd_${bin} ${bin} ${ARGN})
endfunction(add_ydb_drd_test)
file(GLOB transparent_upgrade_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" upgrade*.cc)
file(GLOB tdb_dontrun_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" bdb-simple-deadlock*.cc)
string(REGEX REPLACE "\\.cc(;|$)" "\\1" tdb_dontrun_tests "${tdb_dontrun_srcs}")
file(GLOB srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" *.cc)
list(REMOVE_ITEM srcs ${transparent_upgrade_srcs})
set(recover_srcs test_log2.cc test_log3.cc test_log4.cc test_log5.cc test_log6.cc test_log7.cc test_log8.cc test_log9.cc test_log10.cc)
file(GLOB abortrecover_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" recover-*.cc)
file(GLOB loader_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" loader-*.cc)
file(GLOB stress_test_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" test_stress*.cc)
string(REGEX REPLACE "\\.cc(;|$)" ".recover\\1" recover_tests "${recover_srcs}")
string(REGEX REPLACE "\\.cc(;|$)" ".abortrecover\\1" abortrecover_tests "${abortrecover_srcs}")
string(REGEX REPLACE "\\.cc(;|$)" ".loader\\1" loader_tests "${loader_srcs}")
string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" stress_tests "${stress_test_srcs}")
set(tdb_srcs ${srcs})
list(REMOVE_ITEM tdb_srcs ${tdb_dontrun_srcs})
string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" tdb_bins "${tdb_srcs}")
list(REMOVE_ITEM tdb_srcs ${abortrecover_srcs} ${loader_srcs})
string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" tdb_tests "${tdb_srcs}")
if(BDB_FOUND)
set(bdb_dontrun_srcs
backwards_10_each_le_and_msg
blackhole
blocking-prelock-range
blocking-set-range-reverse-0
blocking-table-lock
bug1381
bug627
cachetable-race
checkpoint_1
checkpoint_callback
checkpoint_stress
cursor-isolation
cursor-set-del-rmw
cursor-set-range-rmw
db-put-simple-deadlock
del-simple
del-multiple
del-multiple-huge-primary-row
del-multiple-srcdb
directory_lock
diskfull
dump-env
env-put-multiple
env_startup
execute-updates
filesize
helgrind1
helgrind2
helgrind3
hotindexer-bw
hotindexer-error-callback
hotindexer-insert-committed-optimized
hotindexer-insert-committed
hotindexer-insert-provisional
hotindexer-lock-test
hotindexer-multiclient
hotindexer-nested-insert-committed
hotindexer-put-abort
hotindexer-put-commit
hotindexer-put-multiple
hotindexer-simple-abort
hotindexer-simple-abort-put
hotindexer-undo-do-test
hotindexer-with-queries
hot-optimize-table-tests
insert-dup-prelock
isolation
isolation-read-committed
keyrange
keyrange-merge
last-verify-time
loader-cleanup-test
loader-create-abort
loader-create-close
loader-dup-test
loader-no-puts
loader-reference-test
loader-stress-del
loader-stress-test
loader-tpch-load
lock-pressure
manyfiles
maxsize-for-loader
multiprocess
mvcc-create-table
mvcc-many-committed
mvcc-read-committed
perf_checkpoint_var
perf_child_txn
perf_cursor_nop
perf_iibench
perf_insert
perf_insert_multiple
perf_malloc_free
perf_nop
perf_ptquery
perf_ptquery2
perf_rangequery
perf_read_write
perf_txn_single_thread
perf_xmalloc_free
prelock-read-read
prelock-read-write
prelock-write-read
prelock-write-write
print_engine_status
powerfail
preload-db
preload-db-nested
progress
put-multiple
queries_with_deletes
recover-2483
recover-3113
recover-5146
recover-compare-db
recover-compare-db-descriptor
recover-del-multiple
recover-del-multiple-abort
recover-del-multiple-srcdb-fdelete-all
recover-delboth-after-checkpoint
recover-delboth-checkpoint
recover-descriptor
recover-descriptor2
recover-descriptor3
recover-descriptor4
recover-descriptor5
recover-descriptor6
recover-descriptor7
recover-descriptor8
recover-descriptor9
recover-descriptor10
recover-descriptor11
recover-descriptor12
recover-fclose-in-checkpoint
recover-fcreate-basementnodesize
recover-flt1
recover-flt2
recover-flt3
recover-flt4
recover-flt5
recover-flt6
recover-flt7
recover-flt8
recover-flt9
recover-flt10
recover-hotindexer-simple-abort-put
recover-loader-test
recover-lsn-filter-multiple
recover-put-multiple
recover-put-multiple-abort
recover-put-multiple-fdelete-all
recover-put-multiple-fdelete-some
recover-put-multiple-srcdb-fdelete-all
recover-split-checkpoint
recover-tablelock
recover-test-logsuppress
recover-test-logsuppress-put
recover-test_stress1
recover-test_stress2
recover-test_stress3
recover-test_stress_openclose
recover-upgrade-db-descriptor-multihandle
recover-upgrade-db-descriptor
recover-update-multiple
recover-update-multiple-abort
recover-update_aborts
recover-update_aborts_before_checkpoint
recover-update_aborts_before_close
recover-update_changes_values
recover-update_changes_values_before_checkpoint
recover-update_changes_values_before_close
recover-update_broadcast_aborts
recover-update_broadcast_aborts2
recover-update_broadcast_aborts3
recover-update_broadcast_aborts_before_checkpoint
recover-update_broadcast_aborts_before_close
recover-update_broadcast_changes_values
recover-update_broadcast_changes_values2
recover-update_broadcast_changes_values3
recover-update_broadcast_changes_values_before_checkpoint
recover-update_broadcast_changes_values_before_close
recover-update_changes_values_before_close
recovery_fileops_stress
recovery_fileops_unit
recovery_stress
redirect
replace-into-write-lock
root_fifo_2
root_fifo_32
root_fifo_41
seqinsert
shutdown-3344
stat64
stat64-create-modify-times
stat64_flatten
stat64-null-txn
stat64-root-changes
stress-gc
stress-gc2
test-xa-prepare
test1324
test1572
test3219
test3522
test3522b
test3529
test_3645
test_3529_insert_2
test_3529_table_lock
test_3755
test_4015
test_4368
test_4657
test_5015
test_5469
test-5138
test938c
test_abort1
test_abort4
test_abort5
test_blobs_leaf_split
test_bulk_fetch
test_compression_methods
test_cmp_descriptor
test_db_change_pagesize
test_db_change_xxx
test_cursor_delete_2119
test_db_descriptor
test_db_descriptor_named_db
test_db_txn_locks_read_uncommitted
test_get_max_row_size
test_large_update_broadcast_small_cachetable
test_locktree_close
test_logflush
test_multiple_checkpoints_block_commit
test_query
test_redirect_func
test_row_size_supported
test_stress0
test_stress1
test_stress2
test_stress3
test_stress4
test_stress5
test_stress6
test_stress7
test_stress_openclose
test_stress_with_verify
test_stress_hot_indexing
test_transactional_descriptor
test_trans_desc_during_chkpt
test_trans_desc_during_chkpt2
test_trans_desc_during_chkpt3
test_trans_desc_during_chkpt4
test_txn_abort6
test_txn_abort8
test_txn_abort9
test_txn_close_open_commit
test_txn_commit8
test_txn_nested1
test_txn_nested2
test_txn_nested3
test_txn_nested4
test_txn_nested5
test_update_abort_works
test_update_calls_back
test_update_can_delete_elements
test_update_changes_values
test_update_nonexistent_keys
test_update_previously_deleted
test_update_stress
test_update_txn_snapshot_works_concurrently
test_update_txn_snapshot_works_correctly_with_deletes
test_update_broadcast_abort_works
test_update_broadcast_calls_back
test_update_broadcast_can_delete_elements
test_update_broadcast_changes_values
test_update_broadcast_previously_deleted
test_update_broadcast_stress
test_update_broadcast_update_fun_has_choices
test_update_broadcast_with_empty_table
test_update_broadcast_indexer
test_update_broadcast_loader
test_update_broadcast_nested_updates
test_update_nested_updates
test_update_with_empty_table
test_updates_single_key
txn-ignore
transactional_fileops
update-multiple-data-diagonal
update-multiple-key0
update-multiple-nochange
update-multiple-with-indexer
update
upgrade_simple
upgrade-test-1
upgrade-test-2
upgrade-test-3
upgrade-test-4
upgrade-test-5
upgrade-test-6
upgrade-test-7
zombie_db
)
set(bdb_srcs ${srcs})
string(REGEX REPLACE "\\.cc(;|$)" "\\1" bdb_testbases "${bdb_srcs}")
list(REMOVE_ITEM bdb_testbases ${bdb_dontrun_srcs})
string(REGEX REPLACE "(.)(;|$)" "\\1.bdb\\2" bdb_tests "${bdb_testbases}")
set(bdb_bins ${bdb_tests})
endif()
set(tdb_tests_that_should_fail
test_db_no_env.tdb
test_log8.recover
test_log9.recover
test_log10.recover
recover-missing-dbfile.abortrecover
recover-missing-dbfile-2.abortrecover
loader-tpch-load.loader
)
## #5138 only reproduces when using the static library.
list(REMOVE_ITEM tdb_bins test-5138.tdb)
add_executable(test-5138.tdb test-5138)
target_link_libraries(test-5138.tdb ${LIBTOKUDB}_static z ${LIBTOKUPORTABILITY}_static ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS})
set_property(TARGET test-5138.tdb APPEND PROPERTY
COMPILE_DEFINITIONS "USE_TDB;IS_TDB=1;TOKUDB=1")
add_space_separated_property(TARGET test-5138.tdb COMPILE_FLAGS -fvisibility=hidden)
add_ydb_test(test-5138.tdb)
foreach(bin ${tdb_bins})
get_filename_component(base ${bin} NAME_WE)
add_executable(${base}.tdb ${base})
# Some of the symbols in util may not be exported properly by
# libtokudb.so.
# We link the test with util directly so that the test code itself can use
# some of those things (i.e. kibbutz in the threaded tests).
target_link_libraries(${base}.tdb util ${LIBTOKUDB} ${LIBTOKUPORTABILITY})
set_property(TARGET ${base}.tdb APPEND PROPERTY
COMPILE_DEFINITIONS "USE_TDB;IS_TDB=1;TOKUDB=1")
add_space_separated_property(TARGET ${base}.tdb COMPILE_FLAGS -fvisibility=hidden)
endforeach(bin)
if(BDB_FOUND)
foreach(bin ${bdb_bins})
get_filename_component(base ${bin} NAME_WE)
add_executable(${base}.bdb ${base})
set_property(TARGET ${base}.bdb APPEND PROPERTY
COMPILE_DEFINITIONS "USE_BDB;IS_TDB=0;TOKU_ALLOW_DEPRECATED")
set_target_properties(${base}.bdb PROPERTIES
INCLUDE_DIRECTORIES "${BDB_INCLUDE_DIR};${CMAKE_CURRENT_BINARY_DIR}/../../toku_include;${CMAKE_CURRENT_SOURCE_DIR}/../../toku_include;${CMAKE_CURRENT_SOURCE_DIR}/../../portability;${CMAKE_CURRENT_SOURCE_DIR}/../..")
target_link_libraries(${base}.bdb ${LIBTOKUPORTABILITY} ${BDB_LIBRARIES})
add_space_separated_property(TARGET ${base}.bdb COMPILE_FLAGS -fvisibility=hidden)
endforeach(bin)
endif()
foreach(bin loader-cleanup-test.tdb diskfull.tdb)
set_property(TARGET ${bin} APPEND PROPERTY
COMPILE_DEFINITIONS DONT_DEPRECATE_WRITES)
endforeach(bin)
macro(declare_custom_tests)
foreach(test ${ARGN})
list(REMOVE_ITEM tdb_tests ${test})
endforeach(test)
endmacro(declare_custom_tests)
declare_custom_tests(test1426.tdb)
if(BDB_FOUND)
macro(declare_custom_bdb_tests)
foreach(test ${ARGN})
list(REMOVE_ITEM bdb_tests ${test})
endforeach(test)
endmacro(declare_custom_bdb_tests)
declare_custom_bdb_tests(test1426.bdb)
configure_file(run_test1426.sh . COPYONLY)
add_test(NAME ydb/test1426.tdb
COMMAND run_test1426.sh
$<TARGET_FILE:test1426.tdb> $<TARGET_FILE:test1426.bdb>
"test1426.tdb.ctest-data" "test1426.bdb.ctest-data"
$<TARGET_FILE:tokudb_dump> "${BDB_INCLUDE_DIR}/../bin/db_dump")
add_dependencies(test1426.tdb tokudb_dump)
endif()
string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" recover_would_be_tdb_tests "${recover_srcs}")
declare_custom_tests(${recover_would_be_tdb_tests})
declare_custom_tests(powerfail.tdb)
add_test(ydb/powerfail.tdb echo must run powerfail by hand)
declare_custom_tests(checkpoint_stress.tdb)
configure_file(run_checkpoint_stress_test.sh . COPYONLY)
add_test(NAME ydb/checkpoint_stress.tdb
COMMAND run_checkpoint_stress_test.sh $<TARGET_FILE:checkpoint_stress.tdb> 5 5001 137)
setup_toku_test_properties(ydb/checkpoint_stress.tdb checkpoint_stress.tdb)
configure_file(run_recover_stress_test.sh . COPYONLY)
add_test(NAME ydb/recover_stress.tdb
COMMAND run_recover_stress_test.sh $<TARGET_FILE:checkpoint_stress.tdb> 5 5001 137)
setup_toku_test_properties(ydb/recover_stress.tdb recover_stress.tdb)
declare_custom_tests(diskfull.tdb)
configure_file(run_diskfull_test.sh . COPYONLY)
add_test(NAME ydb/diskfull.tdb
COMMAND run_diskfull_test.sh $<TARGET_FILE:diskfull.tdb> 134)
setup_toku_test_properties(ydb/diskfull.tdb diskfull.tdb)
declare_custom_tests(recovery_fileops_unit.tdb)
configure_file(run_recovery_fileops_unit.sh . COPYONLY)
file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/recovery_fileops_unit_dir")
foreach(ov c d r)
if (ov STREQUAL c)
set(gset 0)
set(hset 0)
else ()
set(gset 0 1 2 3 4 5)
set(hset 0 1)
endif ()
foreach(av 0 1)
foreach(bv 0 1)
if (bv)
set(dset 0 1)
set(eset 0 1)
else ()
set(dset 0)
set(eset 0)
endif ()
foreach(cv 0 1 2)
foreach(dv ${dset})
foreach(ev ${eset})
foreach(fv 0 1)
foreach(gv ${gset})
foreach(hv ${hset})
if ((NOT ov STREQUAL c) AND (NOT cv) AND ((NOT bv) OR (NOT ev) OR (dv)))
set(iset 0 1)
else ()
set(iset 0)
endif ()
foreach(iv ${iset})
set(testname "ydb/recovery_fileops_unit.${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}")
set(envdir "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}")
set(errfile "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}.ctest-errors")
add_test(NAME ${testname}
COMMAND run_recovery_fileops_unit.sh $<TARGET_FILE:recovery_fileops_unit.tdb> ${errfile} 137
-O ${ov} -A ${av} -B ${bv} -C ${cv} -D ${dv} -E ${ev} -F ${fv} -G ${gv} -H ${hv} -I ${iv}
)
setup_toku_test_properties(${testname} ${envdir})
set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${errfile}")
endforeach(iv)
endforeach(hv)
endforeach(gv)
endforeach(fv)
endforeach(ev)
endforeach(dv)
endforeach(cv)
endforeach(bv)
endforeach(av)
endforeach(ov)
if (NOT (CMAKE_SYSTEM_NAME MATCHES Darwin OR
(CMAKE_CXX_COMPILER_ID STREQUAL Intel AND
CMAKE_BUILD_TYPE STREQUAL Release)
OR USE_GCOV))
declare_custom_tests(helgrind1.tdb)
add_test(NAME ydb/helgrind_helgrind1.tdb
COMMAND valgrind --quiet --tool=helgrind --error-exitcode=1 --log-file=helgrind1.tdb.deleteme $<TARGET_FILE:helgrind1.tdb>)
setup_toku_test_properties(ydb/helgrind_helgrind1.tdb helgrind_helgrind1.tdb)
set_tests_properties(ydb/helgrind_helgrind1.tdb PROPERTIES WILL_FAIL TRUE)
endif()
declare_custom_tests(helgrind2.tdb)
declare_custom_tests(helgrind3.tdb)
add_ydb_helgrind_test(helgrind2.tdb)
add_ydb_helgrind_test(helgrind3.tdb)
declare_custom_tests(test_groupcommit_count.tdb)
add_ydb_test(test_groupcommit_count.tdb -n 1)
add_ydb_helgrind_test(test_groupcommit_count.tdb -n 1)
add_ydb_drd_test(test_groupcommit_count.tdb -n 1)
add_ydb_drd_test(test_4015.tdb)
# We link the locktree so that stress test 0 can call some
# functions (ie: lock escalation) directly.
target_link_libraries(test_stress0.tdb locktree)
# Set up default stress tests and drd tests. Exclude hot_index.
foreach(src ${stress_test_srcs})
if(NOT ${src} MATCHES hot_index)
get_filename_component(base ${src} NAME_WE)
set(test ${base}.tdb)
if (${src} MATCHES test_stress0)
add_ydb_test(${test} --num_elements 512 --num_seconds 1000 --join_timeout 600)
else ()
add_ydb_test(${test} --num_elements 150000 --num_seconds 1000 --join_timeout 600)
endif ()
add_ydb_drd_test_aux(drd_tiny_${test} ${test} --num_seconds 5 --num_elements 150 --join_timeout 3000)
set_tests_properties(ydb/drd_tiny_${test} PROPERTIES TIMEOUT 3600)
add_test(ydb/drd_mid_${test}/prepare ${test} --only_create --num_elements 10000)
setup_toku_test_properties(ydb/drd_mid_${test}/prepare drd_mid_${test})
add_ydb_drd_test_aux(drd_mid_${test} ${test} --only_stress --num_elements 10000 --num_seconds 100 --join_timeout 14400)
set_tests_properties(ydb/drd_mid_${test} PROPERTIES
DEPENDS ydb/drd_mid_${test}/prepare
REQUIRED_FILES "drd_mid_${test}.ctest-data"
TIMEOUT 15000
)
add_test(ydb/drd_large_${test}/prepare ${test} --only_create --num_elements 150000)
setup_toku_test_properties(ydb/drd_large_${test}/prepare drd_large_${test})
add_ydb_drd_test_aux(drd_large_${test} ${test} --only_stress --num_elements 150000 --num_seconds 1000 --join_timeout 28800)
set_tests_properties(ydb/drd_large_${test} PROPERTIES
DEPENDS ydb/drd_large_${test}/prepare
REQUIRED_FILES "drd_large_${test}.ctest-data"
TIMEOUT 30000
)
endif()
endforeach(src)
# Set up upgrade tests. Exclude test_stress_openclose
foreach(src ${stress_test_srcs})
if (NOT ${src} MATCHES test_stress_openclose)
get_filename_component(base ${src} NAME_WE)
set(test ${base}.tdb)
foreach(oldver 4.2.0 5.0.8 5.2.7 6.0.0 6.1.0 6.5.1 6.6.3)
set(versiondir ${TOKU_SVNROOT}/tokudb.data/old-stress-test-envs/${oldver})
if (NOT EXISTS "${versiondir}/")
message(WARNING "Test data for upgrade tests for version ${oldver} doesn't exist, check out ${versiondir}/*-2000-dir first or upgrade stress tests may fail.")
endif ()
foreach(p_or_s pristine stressed)
if (NOT (${base} MATCHES test_stress4 AND ${p_or_s} MATCHES stressed))
foreach(size 2000)
set(oldenvdir "${versiondir}/saved${p_or_s}-${size}-dir")
set(envdirbase "${upgrade}_${oldver}_${p_or_s}_${size}_${test}")
set(envdir "${envdirbase}.ctest-data")
set(testnamebase ydb/${test}/upgrade/${oldver}/${p_or_s}/${size})
add_test(NAME ${testnamebase}/remove
COMMAND ${CMAKE_COMMAND} -E remove_directory "${envdir}")
add_test(NAME ${testnamebase}/copy
COMMAND ${CMAKE_COMMAND} -E copy_directory "${oldenvdir}" "${envdir}")
set_tests_properties(${testnamebase}/copy PROPERTIES
DEPENDS ${testnamebase}/remove
REQUIRED_FILES "${oldenvdir}")
add_test(NAME ${testnamebase}
COMMAND ${test} --only_stress --num_elements ${size} --num_seconds 600 --join_timeout 7200)
setup_toku_test_properties(${testnamebase} "${envdirbase}")
set_tests_properties(${testnamebase} PROPERTIES
DEPENDS ${testnamebase}/copy
REQUIRED_FILES "${envdir}"
TIMEOUT 10800)
endforeach(size)
endif ()
endforeach(p_or_s)
endforeach(oldver)
endif ()
endforeach(src)
if (NOT EXISTS "${TOKU_SVNROOT}/tokudb.data/test_5902/")
message(WARNING "Test data for dump-env.tdb doesn't exist, check out ${TOKU_SVNROOT}/tokudb.data/test_5902 first or dump-env.tdb may fail.")
endif ()
declare_custom_tests(dump-env.tdb)
add_test(NAME ydb/dump-env.tdb/remove
COMMAND ${CMAKE_COMMAND} -E remove_directory "dump-env.tdb.ctest-data")
add_test(NAME ydb/dump-env.tdb/copy
COMMAND ${CMAKE_COMMAND} -E copy_directory "${TOKU_SVNROOT}/tokudb.data/test_5902" "dump-env.tdb.ctest-data")
set_tests_properties(ydb/dump-env.tdb/copy PROPERTIES
DEPENDS ydb/dump-env.tdb/remove
REQUIRED_FILES "${TOKU_SVNROOT}/tokudb.data/test_5902")
add_ydb_test(dump-env.tdb)
set_tests_properties(ydb/dump-env.tdb PROPERTIES
DEPENDS ydb/dump-env.tdb/copy
REQUIRED_FILES "dump-env.tdb.ctest-data")
## for some reason this rule doesn't run with the makefile and it crashes with this rule, so I'm disabling this special case
#declare_custom_tests(test_thread_stack.tdb)
#add_custom_command(OUTPUT run_test_thread_stack.sh
# COMMAND install "${CMAKE_CURRENT_SOURCE_DIR}/run_test_thread_stack.sh" "${CMAKE_CFG_INTDIR}"
# MAIN_DEPENDENCY run_test_thread_stack.sh
# VERBATIM)
#add_custom_target(install_run_test_thread_stack.sh ALL DEPENDS run_test_thread_stack.sh)
#add_test(ydb/test_thread_stack.tdb run_test_thread_stack.sh "${CMAKE_CFG_INTDIR}/test_thread_stack.tdb")
declare_custom_tests(root_fifo_41.tdb)
foreach(num RANGE 1 100)
add_ydb_test_aux(root_fifo_41_${num}_populate.tdb root_fifo_41.tdb -n ${num} -populate)
add_ydb_test_aux(root_fifo_41_${num}_nopopulate.tdb root_fifo_41.tdb -n ${num})
endforeach(num)
add_ydb_test_aux(test3039_small.tdb test3039.tdb -n 1000)
declare_custom_tests(test_abort4.tdb)
foreach(num RANGE -1 19)
add_ydb_test_aux(test_abort4_${num}_0.tdb test_abort4.tdb -c 0 -l ${num})
add_ydb_test_aux(test_abort4_${num}_1.tdb test_abort4.tdb -c 1 -l ${num})
endforeach(num)
set(old_loader_upgrade_data "${TOKU_SVNROOT}/tokudb.data/env_preload.4.2.0.emptydictionaries.cleanshutdown")
if (NOT EXISTS "${old_loader_upgrade_data}/")
message(WARNING "Test data for loader upgrade tests doesn't exist, check out ${old_loader_upgrade_data} first, or loader-stress-test3.tdb may fail.")
endif ()
function(add_loader_upgrade_test name bin)
add_test(NAME ydb/${name}/remove
COMMAND ${CMAKE_COMMAND} -E remove_directory "${name}.ctest-data")
add_test(NAME ydb/${name}/copy
COMMAND ${CMAKE_COMMAND} -E copy_directory "${old_loader_upgrade_data}" "${name}.ctest-data")
set_tests_properties(ydb/${name}/copy PROPERTIES
DEPENDS ydb/${name}/remove
REQUIRED_FILES "${old_loader_upgrade_data}")
add_ydb_test_aux(${name} ${bin} -u ${ARGN})
set_tests_properties(ydb/${name} PROPERTIES
DEPENDS ydb/${name}/copy
REQUIRED_FILES "${name}.ctest-data")
endfunction(add_loader_upgrade_test)
list(REMOVE_ITEM loader_tests loader-stress-test.loader)
add_ydb_test_aux(loader-stress-test0.tdb loader-stress-test.tdb -c)
add_ydb_test_aux(loader-stress-test1.tdb loader-stress-test.tdb -c -p)
add_ydb_test_aux(loader-stress-test2.tdb loader-stress-test.tdb -r 5000 -s)
add_loader_upgrade_test(loader-stress-test3.tdb loader-stress-test.tdb -c)
add_ydb_test_aux(loader-stress-test4.tdb loader-stress-test.tdb -r 10000000 -c)
add_ydb_test_aux(loader-stress-test0z.tdb loader-stress-test.tdb -c -z)
add_ydb_test_aux(loader-stress-test1z.tdb loader-stress-test.tdb -c -p -z)
add_ydb_test_aux(loader-stress-test2z.tdb loader-stress-test.tdb -r 5000 -s -z)
add_loader_upgrade_test(loader-stress-test3z.tdb loader-stress-test.tdb -c -z)
add_ydb_test_aux(loader-stress-test4z.tdb loader-stress-test.tdb -r 500000 -c -z --valsize 28)
list(REMOVE_ITEM loader_tests loader-dup-test.loader)
add_ydb_test_aux(loader-dup-test0.tdb loader-dup-test.tdb)
add_ydb_test_aux(loader-dup-test1.tdb loader-dup-test.tdb -d 1 -r 500000)
add_ydb_test_aux(loader-dup-test2.tdb loader-dup-test.tdb -d 1 -r 1000000)
add_ydb_test_aux(loader-dup-test3.tdb loader-dup-test.tdb -d 1 -s -r 100)
add_ydb_test_aux(loader-dup-test4.tdb loader-dup-test.tdb -d 1 -s -r 1000)
add_ydb_test_aux(loader-dup-test5.tdb loader-dup-test.tdb -d 1 -s -r 1000 -E)
add_ydb_test_aux(loader-dup-test0z.tdb loader-dup-test.tdb -z)
add_ydb_test_aux(loader-dup-test1z.tdb loader-dup-test.tdb -d 1 -r 500000 -z)
add_ydb_test_aux(loader-dup-test2z.tdb loader-dup-test.tdb -d 1 -r 1000000 -z)
add_ydb_test_aux(loader-dup-test3z.tdb loader-dup-test.tdb -d 1 -s -r 100 -z)
add_ydb_test_aux(loader-dup-test4z.tdb loader-dup-test.tdb -d 1 -s -r 1000 -z)
add_ydb_test_aux(loader-dup-test5z.tdb loader-dup-test.tdb -d 1 -s -r 1000 -E -z)
## as part of #4503, we took out test 1 and 3
list(REMOVE_ITEM loader_tests loader-cleanup-test.loader)
add_ydb_test_aux(loader-cleanup-test0.tdb loader-cleanup-test.tdb -s -r 800)
#add_ydb_test_aux(loader-cleanup-test1.tdb loader-cleanup-test.tdb -s -r 800 -p)
add_ydb_test_aux(loader-cleanup-test2.tdb loader-cleanup-test.tdb -s -r 8000)
#add_ydb_test_aux(loader-cleanup-test3.tdb loader-cleanup-test.tdb -s -r 8000 -p)
add_ydb_test_aux(loader-cleanup-test0z.tdb loader-cleanup-test.tdb -s -r 800 -z)
add_ydb_test_aux(loader-cleanup-test2z.tdb loader-cleanup-test.tdb -s -r 8000 -z)
declare_custom_tests(keyrange.tdb)
add_ydb_test_aux(keyrange-get0.tdb keyrange.tdb --get 0)
add_ydb_test_aux(keyrange-get1.tdb keyrange.tdb --get 1)
if (0)
add_ydb_test_aux(keyrange-random-get0.tdb keyrange.tdb --get 0 --random_keys 1)
add_ydb_test_aux(keyrange-random-get1.tdb keyrange.tdb --get 1 --random_keys 1)
else ()
message(WARNING "TODO(leif): re-enable keyrange tests, see #5666")
endif ()
add_ydb_test_aux(keyrange-loader-get0.tdb keyrange.tdb --get 0 --loader 1)
add_ydb_test_aux(keyrange-loader-get1.tdb keyrange.tdb --get 1 --loader 1)
declare_custom_tests(maxsize-for-loader.tdb)
add_ydb_test_aux(maxsize-for-loader-A.tdb maxsize-for-loader.tdb -f -c)
add_ydb_test_aux(maxsize-for-loader-B.tdb maxsize-for-loader.tdb -c)
add_ydb_test_aux(maxsize-for-loader-Az.tdb maxsize-for-loader.tdb -f -z -c)
add_ydb_test_aux(maxsize-for-loader-Bz.tdb maxsize-for-loader.tdb -z -c)
declare_custom_tests(hotindexer-undo-do-test.tdb)
file(GLOB hotindexer_tests RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "hotindexer-undo-do-tests/*.test")
file(GLOB hotindexer_results RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "hotindexer-undo-do-tests/*.result")
configure_file(run-hotindexer-undo-do-tests.bash . COPYONLY)
foreach(result ${hotindexer_results})
configure_file(${result} ${result} COPYONLY)
endforeach(result)
foreach(test ${hotindexer_tests})
configure_file(${test} ${test} COPYONLY)
add_test(NAME ydb/${test} COMMAND run-hotindexer-undo-do-tests.bash ${test})
setup_toku_test_properties(ydb/${test} ${test})
endforeach()
foreach(test ${tdb_tests} ${bdb_tests})
add_ydb_test(${test})
endforeach(test)
configure_file(run_recover_test.sh . COPYONLY)
foreach(recover_test ${recover_tests})
get_filename_component(base ${recover_test} NAME_WE)
add_test(NAME ydb/${recover_test}
COMMAND run_recover_test.sh $<TARGET_FILE:${base}.tdb> "${recover_test}.ctest-data" $<TARGET_FILE:tdb-recover> $<TARGET_FILE:tokudb_dump>)
setup_toku_test_properties(ydb/${recover_test} ${recover_test})
endforeach(recover_test)
configure_file(run_abortrecover_test.sh . COPYONLY)
foreach(abortrecover_test ${abortrecover_tests})
get_filename_component(base ${abortrecover_test} NAME_WE)
add_test(NAME ydb/${abortrecover_test}
COMMAND run_abortrecover_test.sh $<TARGET_FILE:${base}.tdb>)
setup_toku_test_properties(ydb/${abortrecover_test} ${abortrecover_test})
endforeach(abortrecover_test)
## alternate implementation, doesn't work because the abort phase crashes and we can't tell cmake that's expected
# foreach(abortrecover_test ${abortrecover_tests})
# get_filename_component(base ${abortrecover_test} NAME_WE)
# set(test ${base}.tdb)
# add_test(NAME ydb/${test}/abort
# COMMAND ${test} --test)
# setup_toku_test_properties(ydb/${test}/abort ${abortrecover_test})
# set_tests_properties(ydb/${test}/abort PROPERTIES WILL_FAIL TRUE)
# add_test(NAME ydb/${test}/recover
# COMMAND ${test} --recover)
# setup_toku_test_properties(ydb/${test}/recover ${abortrecover_test})
# set_tests_properties(ydb/${test}/recover PROPERTIES
# DEPENDS ydb/${test}/abort
# REQUIRED_FILES "${abortrecover_test}.ctest-data")
# endforeach(abortrecover_test)
foreach(loader_test ${loader_tests})
get_filename_component(base ${loader_test} NAME_WE)
add_ydb_test_aux(${base}.nop.loader ${base}.tdb)
add_ydb_test_aux(${base}.p.loader ${base}.tdb -p)
add_ydb_test_aux(${base}.comp.loader ${base}.tdb -z)
if("${tdb_tests_that_should_fail}" MATCHES "${base}.loader")
list(REMOVE_ITEM tdb_tests_that_should_fail ${base}.loader)
list(APPEND tdb_tests_that_should_fail ${base}.nop.loader ${base}.p.loader ${base}.comp.loader)
endif()
endforeach(loader_test)
set(tdb_tests_that_should_fail "ydb/${tdb_tests_that_should_fail}")
string(REGEX REPLACE ";" ";ydb/" tdb_tests_that_should_fail "${tdb_tests_that_should_fail}")
set_tests_properties(${tdb_tests_that_should_fail} PROPERTIES WILL_FAIL TRUE)
## give some tests, that time out normally, 1 hour to complete
set(long_tests
ydb/checkpoint_1.tdb
ydb/drd_test_groupcommit_count.tdb
ydb/env-put-multiple.tdb
ydb/filesize.tdb
ydb/loader-cleanup-test0.tdb
ydb/loader-cleanup-test0z.tdb
ydb/manyfiles.tdb
ydb/recover-loader-test.abortrecover
ydb/recovery_fileops_stress.tdb
ydb/root_fifo_1.tdb
ydb/root_fifo_2.tdb
ydb/root_fifo_31.tdb
ydb/root_fifo_32.tdb
ydb/shutdown-3344.tdb
ydb/stat64-create-modify-times.tdb
ydb/test1572.tdb
ydb/test_abort4_19_0.tdb
ydb/test_abort4_19_1.tdb
ydb/test_abort5.tdb
ydb/test_archive1.tdb
ydb/test_logmax.tdb
ydb/test_query.tdb
ydb/test_txn_abort5.tdb
ydb/test_txn_abort5a.tdb
ydb/test_txn_abort6.tdb
ydb/test_txn_nested2.tdb
ydb/test_txn_nested4.tdb
ydb/test_txn_nested5.tdb
ydb/test_update_broadcast_stress.tdb
)
if (BDB_FOUND)
list(APPEND long_tests
ydb/root_fifo_1.bdb
ydb/root_fifo_31.bdb
ydb/rowsize.bdb
ydb/test_log10.bdb
ydb/test_log7.bdb
ydb/test_logmax.bdb
)
endif (BDB_FOUND)
set_tests_properties(${long_tests} PROPERTIES TIMEOUT 3600)
## some take even longer, with valgrind
set(extra_long_tests
ydb/drd_test_4015.tdb
ydb/hotindexer-with-queries.tdb
ydb/hot-optimize-table-tests.tdb
ydb/loader-cleanup-test2.tdb
ydb/loader-cleanup-test2z.tdb
ydb/loader-dup-test0.tdb
ydb/loader-stress-del.nop.loader
ydb/loader-stress-del.p.loader
ydb/loader-stress-del.comp.loader
ydb/test3039.tdb
ydb/test_update_stress.tdb
)
if (BDB_FOUND)
list(APPEND extra_long_tests
ydb/test_groupcommit_count.bdb
)
endif (BDB_FOUND)
set_tests_properties(${extra_long_tests} PROPERTIES TIMEOUT 7200)
## these really take a long time with valgrind
set(phenomenally_long_tests
ydb/checkpoint_stress.tdb
ydb/loader-stress-test4.tdb
ydb/loader-stress-test4z.tdb
ydb/recover_stress.tdb
ydb/test3529.tdb
)
if (BDB_FOUND)
list(APPEND phenomenally_long_tests
ydb/test1426.tdb
)
endif (BDB_FOUND)
set_tests_properties(${phenomenally_long_tests} PROPERTIES TIMEOUT 14400)
endif(BUILD_TESTING OR BUILD_SRC_TESTS)
......@@ -24,7 +24,7 @@
static int create_child_txn(DB_TXN* txn, ARG arg, void* UU(operation_extra), void *UU(stats_extra)) {
DB_TXN* child_txn = NULL;
DB_ENV* env = arg->env;
int r = env->txn_begin(env, txn, &child_txn, arg->txn_flags);
int r = env->txn_begin(env, txn, &child_txn, arg->txn_type);
CKERR(r);
r = child_txn->commit(child_txn, 0);
CKERR(r);
......
......@@ -376,7 +376,6 @@ stress_table(DB_ENV* env, DB **dbs, struct cli_args *cli_args) {
} else {
myargs[i].operation = iibench_rangequery_op;
myargs[i].operation_extra = &put_extra;
myargs[i].txn_flags |= DB_TXN_READ_ONLY;
myargs[i].sleep_ms = 1000; // 1 second between range queries
}
}
......
......@@ -54,7 +54,6 @@ stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
for (int i = 0; i < num_threads; i++) {
arg_init(&myargs[i], dbp, env, cli_args);
myargs[i].operation = ptquery_op;
myargs[i].txn_flags |= DB_TXN_READ_ONLY;
}
run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
}
......
......@@ -67,7 +67,6 @@ stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
thread_ids[i] = i % cli_args->num_DBs;
myargs[i].operation = ptquery_op2;
myargs[i].operation_extra = &thread_ids[i];
myargs[i].txn_flags |= DB_TXN_READ_ONLY;
}
run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
}
......
......@@ -23,7 +23,6 @@ stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
for (int i = 0; i < num_threads; i++) {
arg_init(&myargs[i], dbp, env, cli_args);
myargs[i].operation = rangequery_op;
myargs[i].txn_flags |= DB_TXN_READ_ONLY;
}
run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
}
......
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: perf_nop.cc 45903 2012-07-19 13:06:39Z leifwalsh $"
#include "test.h"
#include <stdio.h>
#include <stdlib.h>
#include <toku_pthread.h>
#include <unistd.h>
#include <memory.h>
#include <sys/stat.h>
#include <db.h>
#include "threaded_stress_test_helpers.h"
// The intent of this test is to measure the throughput of creating and destroying
// root read-only transactions that create snapshots
static int UU() nop(DB_TXN* UU(txn), ARG UU(arg), void* UU(operation_extra), void *UU(stats_extra)) {
return 0;
}
static void
stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
if (verbose) printf("starting creation of pthreads\n");
const int num_threads = cli_args->num_ptquery_threads;
struct arg myargs[num_threads];
for (int i = 0; i < num_threads; i++) {
arg_init(&myargs[i], dbp, env, cli_args);
myargs[i].txn_flags |= DB_TXN_READ_ONLY;
myargs[i].operation = nop;
}
run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
}
int
test_main(int argc, char *const argv[]) {
struct cli_args args = get_default_args_for_perf();
parse_stress_test_args(argc, argv, &args);
args.single_txn = false;
args.num_elements = 0;
args.num_DBs = 0;
args.num_put_threads = 0;
args.num_update_threads = 0;
stress_test_main(&args);
return 0;
}
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: perf_txn_single_thread.cc 51911 2013-01-10 18:21:29Z zardosht $"
#include "test.h"
#include <stdio.h>
#include <stdlib.h>
#include <toku_pthread.h>
#include <unistd.h>
#include <memory.h>
#include <sys/stat.h>
#include <db.h>
#include "threaded_stress_test_helpers.h"
// The intent of this test is to measure how fast a single thread can
// commit and create transactions when there exist N transactions.
DB_TXN** txns;
int num_txns;
static int commit_and_create_txn(
DB_TXN* UU(txn),
ARG arg,
void* UU(operation_extra),
void* UU(stats_extra)
)
{
int rand_txn_id = random() % num_txns;
int r = txns[rand_txn_id]->commit(txns[rand_txn_id], 0);
CKERR(r);
r = arg->env->txn_begin(arg->env, 0, &txns[rand_txn_id], arg->txn_flags | DB_TXN_READ_ONLY);
CKERR(r);
return 0;
}
static void
stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
if (verbose) printf("starting running of stress\n");
num_txns = cli_args->txn_size;
XCALLOC_N(num_txns, txns);
for (int i = 0; i < num_txns; i++) {
int r = env->txn_begin(env, 0, &txns[i], DB_TXN_SNAPSHOT);
CKERR(r);
}
struct arg myarg;
arg_init(&myarg, dbp, env, cli_args);
myarg.operation = commit_and_create_txn;
run_workers(&myarg, 1, cli_args->num_seconds, false, cli_args);
for (int i = 0; i < num_txns; i++) {
int chk_r = txns[i]->commit(txns[i], 0);
CKERR(chk_r);
}
toku_free(txns);
num_txns = 0;
}
int
test_main(int argc, char *const argv[]) {
num_txns = 0;
txns = NULL;
struct cli_args args = get_default_args_for_perf();
parse_stress_test_args(argc, argv, &args);
args.single_txn = true;
// this test is all about transactions, make the DB small
args.num_elements = 1;
args.num_DBs= 1;
perf_test_main(&args);
return 0;
}
......@@ -31,7 +31,7 @@ static int commit_and_create_txn(
int rand_txn_id = random() % num_txns;
int r = txns[rand_txn_id]->commit(txns[rand_txn_id], 0);
CKERR(r);
r = arg->env->txn_begin(arg->env, 0, &txns[rand_txn_id], arg->txn_flags);
r = arg->env->txn_begin(arg->env, 0, &txns[rand_txn_id], arg->txn_type);
CKERR(r);
return 0;
}
......
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id: test_get_max_row_size.cc 45903 2012-07-19 13:06:39Z leifwalsh $"
#ident "Copyright (c) 2007-2012 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "test.h"
int test_main(int argc, char * const argv[])
{
int r;
DB * db;
DB_ENV * env;
(void) argc;
(void) argv;
toku_os_recursive_delete(TOKU_TEST_FILENAME);
r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); { int chk_r = r; CKERR(chk_r); }
// set things up
r = db_env_create(&env, 0);
CKERR(r);
r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, 0755);
CKERR(r);
r = db_create(&db, env, 0);
CKERR(r);
r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_CREATE, 0644);
CKERR(r);
DB_TXN* txn = NULL;
r = env->txn_begin(env, 0, &txn, DB_TXN_SNAPSHOT);
CKERR(r);
int k = 1;
int v = 10;
DBT key, val;
r = db->put(
db,
txn,
dbt_init(&key, &k, sizeof k),
dbt_init(&val, &v, sizeof v),
0
);
CKERR(r);
k = 2;
v = 20;
r = db->put(
db,
txn,
dbt_init(&key, &k, sizeof k),
dbt_init(&val, &v, sizeof v),
0
);
CKERR(r);
r = txn->commit(txn, 0);
CKERR(r);
r = env->txn_begin(env, 0, &txn, DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY);
CKERR(r);
DBC* cursor = NULL;
r = db->cursor(db, txn, &cursor, 0);
CKERR(r);
DBT key1, val1;
memset(&key1, 0, sizeof key1);
memset(&val1, 0, sizeof val1);
r = cursor->c_get(cursor, &key1, &val1, DB_FIRST);
CKERR(r);
invariant(key1.size == sizeof(int));
invariant(*(int *)key1.data == 1);
invariant(val1.size == sizeof(int));
invariant(*(int *)val1.data == 10);
r = cursor->c_get(cursor, &key1, &val1, DB_NEXT);
CKERR(r);
invariant(key1.size == sizeof(int));
invariant(*(int *)key1.data == 2);
invariant(val1.size == sizeof(int));
invariant(*(int *)val1.data == 20);
r = cursor->c_close(cursor);
CKERR(r);
r = txn->commit(txn, 0);
CKERR(r);
// clean things up
r = db->close(db, 0);
CKERR(r);
r = env->close(env, 0);
CKERR(r);
return 0;
}
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id: test_get_max_row_size.cc 45903 2012-07-19 13:06:39Z leifwalsh $"
#ident "Copyright (c) 2007-2012 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "test.h"
static int update_fun(DB *UU(db),
const DBT *UU(key),
const DBT *UU(old_val), const DBT *UU(extra),
void (*set_val)(const DBT *new_val,
void *set_extra),
void *UU(set_extra))
{
abort();
assert(set_val != NULL);
return 0;
}
static int generate_row_for_put(
DB *UU(dest_db),
DB *UU(src_db),
DBT *UU(dest_key),
DBT *UU(dest_val),
const DBT *UU(src_key),
const DBT *UU(src_val)
)
{
abort();
return 0;
}
static int generate_row_for_del(
DB *UU(dest_db),
DB *UU(src_db),
DBT *UU(dest_key),
const DBT *UU(src_key),
const DBT *UU(src_val)
)
{
abort();
return 0;
}
static void test_invalid_ops(uint32_t iso_flags) {
int r;
DB * db;
DB_ENV * env;
toku_os_recursive_delete(TOKU_TEST_FILENAME);
r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); { int chk_r = r; CKERR(chk_r); }
// set things up
r = db_env_create(&env, 0);
CKERR(r);
r = env->set_generate_row_callback_for_put(env,generate_row_for_put);
CKERR(r);
r = env->set_generate_row_callback_for_del(env,generate_row_for_del);
CKERR(r);
env->set_update(env, update_fun);
r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, 0755);
CKERR(r);
r = db_create(&db, env, 0);
CKERR(r);
DB_TXN* txn = NULL;
r = env->txn_begin(env, 0, &txn, iso_flags | DB_TXN_READ_ONLY);
CKERR(r);
r = db->open(db, txn, "foo.db", NULL, DB_BTREE, DB_CREATE, 0644);
CKERR2(r, EINVAL);
r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_CREATE, 0644);
CKERR(r);
int k = 1;
int v = 10;
DBT key, val;
dbt_init(&key, &k, sizeof k);
dbt_init(&val, &v, sizeof v);
uint32_t db_flags = 0;
uint32_t indexer_flags = 0;
DB_INDEXER* indexer;
r = env->create_indexer(
env,
txn,
&indexer,
db,
1,
&db,
&db_flags,
indexer_flags
);
CKERR2(r, EINVAL);
// test invalid operations of ydb_db.cc,
// db->open tested above
DB_LOADER* loader;
uint32_t put_flags = 0;
uint32_t dbt_flags = 0;
r = env->create_loader(env, txn, &loader, NULL, 1, &db, &put_flags, &dbt_flags, 0);
CKERR2(r, EINVAL);
r = db->change_descriptor(db, txn, &key, 0);
CKERR2(r, EINVAL);
//
// test invalid operations return EINVAL from ydb_write.cc
//
r = db->put(db, txn, &key, &val,0);
CKERR2(r, EINVAL);
r = db->del(db, txn, &key, DB_DELETE_ANY);
CKERR2(r, EINVAL);
r = db->update(db, txn, &key, &val, 0);
CKERR2(r, EINVAL);
r = db->update_broadcast(db, txn, &val, 0);
CKERR2(r, EINVAL);
r = env->put_multiple(env, NULL, txn, &key, &val, 1, &db, &key, &val, 0);
CKERR2(r, EINVAL);
r = env->del_multiple(env, NULL, txn, &key, &val, 1, &db, &key, 0);
CKERR2(r, EINVAL);
uint32_t flags;
r = env->update_multiple(
env, NULL, txn,
&key, &val,
&key, &val,
1, &db, &flags,
1, &key,
1, &val
);
CKERR2(r, EINVAL);
r = db->close(db, 0);
CKERR(r);
// test invalid operations of ydb.cc, dbrename and dbremove
r = env->dbremove(env, txn, "foo.db", NULL, 0);
CKERR2(r, EINVAL);
// test invalid operations of ydb.cc, dbrename and dbremove
r = env->dbrename(env, txn, "foo.db", NULL, "bar.db", 0);
CKERR2(r, EINVAL);
r = txn->commit(txn, 0);
CKERR(r);
// clean things up
r = env->close(env, 0);
CKERR(r);
}
int test_main(int argc, char * const argv[]) {
(void) argc;
(void) argv;
test_invalid_ops(0);
test_invalid_ops(DB_TXN_SNAPSHOT);
test_invalid_ops(DB_READ_COMMITTED);
test_invalid_ops(DB_READ_UNCOMMITTED);
return 0;
}
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id: test_get_max_row_size.cc 45903 2012-07-19 13:06:39Z leifwalsh $"
#ident "Copyright (c) 2007-2012 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "test.h"
static void test_read_txn_creation(DB_ENV* env, uint32_t iso_flags) {
int r;
DB_TXN* parent_txn = NULL;
DB_TXN* child_txn = NULL;
r = env->txn_begin(env, 0, &parent_txn, iso_flags);
CKERR(r);
r = env->txn_begin(env, parent_txn, &child_txn, iso_flags | DB_TXN_READ_ONLY);
CKERR2(r, EINVAL);
r = env->txn_begin(env, parent_txn, &child_txn, iso_flags);
CKERR(r);
r = child_txn->commit(child_txn, 0);
CKERR(r);
r = parent_txn->commit(parent_txn, 0);
CKERR(r);
r = env->txn_begin(env, 0, &parent_txn, iso_flags | DB_TXN_READ_ONLY);
CKERR(r);
r = env->txn_begin(env, parent_txn, &child_txn, iso_flags | DB_TXN_READ_ONLY);
CKERR(r);
r = child_txn->commit(child_txn, 0);
CKERR(r);
r = env->txn_begin(env, parent_txn, &child_txn, iso_flags);
CKERR(r);
r = child_txn->commit(child_txn, 0);
CKERR(r);
r = parent_txn->commit(parent_txn, 0);
CKERR(r);
}
int test_main(int argc, char * const argv[])
{
int r;
DB_ENV * env;
(void) argc;
(void) argv;
toku_os_recursive_delete(TOKU_TEST_FILENAME);
r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); { int chk_r = r; CKERR(chk_r); }
// set things up
r = db_env_create(&env, 0);
CKERR(r);
r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, 0755);
CKERR(r);
test_read_txn_creation(env, 0);
test_read_txn_creation(env, DB_TXN_SNAPSHOT);
test_read_txn_creation(env, DB_READ_COMMITTED);
test_read_txn_creation(env, DB_READ_UNCOMMITTED);
r = env->close(env, 0);
CKERR(r);
return 0;
}
......@@ -69,7 +69,6 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
soe[1].prefetch = false;
myargs[1].operation_extra = &soe[1];
myargs[1].operation = scan_op;
myargs[1].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
// make the backward fast scanner
soe[2].fast = true;
......@@ -77,7 +76,6 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
soe[2].prefetch = false;
myargs[2].operation_extra = &soe[2];
myargs[2].operation = scan_op;
myargs[2].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
// make the backward slow scanner
soe[3].fast = false;
......
......@@ -63,7 +63,6 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
soe[1].prefetch = false;
myargs[1].operation_extra = &soe[1];
myargs[1].operation = scan_op;
myargs[1].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
// make the backward fast scanner
soe[2].fast = true;
......@@ -71,7 +70,6 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
soe[2].prefetch = false;
myargs[2].operation_extra = &soe[2];
myargs[2].operation = scan_op;
myargs[2].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
// make the backward slow scanner
soe[3].fast = false;
......
......@@ -62,7 +62,6 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
soe[1].prefetch = false;
myargs[1].operation_extra = &soe[1];
myargs[1].operation = scan_op;
myargs[1].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
// make the backward fast scanner
soe[2].fast = true;
......@@ -70,7 +69,6 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
soe[2].prefetch = false;
myargs[2].operation_extra = &soe[2];
myargs[2].operation = scan_op;
myargs[2].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
// make the backward slow scanner
soe[3].fast = false;
......
......@@ -36,7 +36,6 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
soe[0].prefetch = false;
myargs[0].operation_extra = &soe[0];
myargs[0].operation = scan_op;
myargs[0].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
// make the forward slow scanner
soe[1].fast = false;
......
......@@ -135,7 +135,7 @@ struct arg {
// DB are in [0, num_elements)
// false otherwise
int sleep_ms; // number of milliseconds to sleep between operations
uint32_t txn_flags; // isolation level for txn running operation
uint32_t txn_type; // isolation level for txn running operation
operation_t operation; // function that is the operation to be run
void* operation_extra; // extra parameter passed to operation
enum stress_lock_type lock_type; // states if operation must be exclusive, shared, or does not require locking
......@@ -154,7 +154,7 @@ static void arg_init(struct arg *arg, DB **dbp, DB_ENV *env, struct cli_args *cl
arg->bounded_element_range = true;
arg->sleep_ms = 0;
arg->lock_type = STRESS_LOCK_NONE;
arg->txn_flags = DB_TXN_SNAPSHOT;
arg->txn_type = DB_TXN_SNAPSHOT;
arg->operation_extra = nullptr;
arg->do_prepare = false;
arg->prelock_updates = false;
......@@ -486,12 +486,12 @@ static void *worker(void *arg_v) {
printf("%lu starting %p\n", (unsigned long) intself, arg->operation);
}
if (arg->cli->single_txn) {
r = env->txn_begin(env, 0, &txn, arg->txn_flags); CKERR(r);
r = env->txn_begin(env, 0, &txn, arg->txn_type); CKERR(r);
}
while (run_test) {
lock_worker_op(we);
if (!arg->cli->single_txn) {
r = env->txn_begin(env, 0, &txn, arg->txn_flags); CKERR(r);
r = env->txn_begin(env, 0, &txn, arg->txn_type); CKERR(r);
}
r = arg->operation(txn, arg, arg->operation_extra, we->counters);
if (r==0 && !arg->cli->single_txn && arg->do_prepare) {
......@@ -2649,7 +2649,7 @@ UU() stress_recover(struct cli_args *args) {
DB_TXN* txn = nullptr;
struct arg recover_args;
arg_init(&recover_args, dbs, env, args);
int r = env->txn_begin(env, 0, &txn, recover_args.txn_flags);
int r = env->txn_begin(env, 0, &txn, recover_args.txn_type);
CKERR(r);
struct scan_op_extra soe = {
.fast = true,
......
......@@ -206,16 +206,6 @@ env_opened(DB_ENV *env) {
return env->i->cachetable != 0;
}
static inline bool
txn_is_read_only(DB_TXN* txn) {
if (txn && (db_txn_struct_i(txn)->flags & DB_TXN_READ_ONLY)) {
return true;
}
return false;
}
#define HANDLE_READ_ONLY_TXN(txn) if(txn_is_read_only(txn)) return EINVAL;
void env_panic(DB_ENV * env, int cause, const char * msg);
void env_note_db_opened(DB_ENV *env, DB *db);
void env_note_db_closed(DB_ENV *env, DB *db);
......
......@@ -1159,7 +1159,6 @@ static int
locked_env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, uint32_t flags) {
int ret, r;
HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn);
HANDLE_READ_ONLY_TXN(txn);
DB_TXN *child_txn = NULL;
int using_txns = env->i->open_flags & DB_INIT_TXN;
......@@ -1190,7 +1189,6 @@ static int env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char
static int
locked_env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, const char *newname, uint32_t flags) {
int ret, r;
HANDLE_READ_ONLY_TXN(txn);
HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn);
DB_TXN *child_txn = NULL;
......@@ -2363,7 +2361,6 @@ env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, u
if (!env_opened(env) || flags != 0) {
return EINVAL;
}
HANDLE_READ_ONLY_TXN(txn);
if (dbname != NULL) {
// env_dbremove_subdb() converts (fname, dbname) to dname
return env_dbremove_subdb(env, txn, fname, dbname, flags);
......@@ -2470,7 +2467,6 @@ env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, co
if (!env_opened(env) || flags != 0) {
return EINVAL;
}
HANDLE_READ_ONLY_TXN(txn);
if (dbname != NULL) {
// env_dbrename_subdb() converts (fname, dbname) to dname and (fname, newname) to newdname
return env_dbrename_subdb(env, txn, fname, dbname, newname, flags);
......
......@@ -210,7 +210,6 @@ static uint64_t nontransactional_open_id = 0;
static int
toku_db_open(DB * db, DB_TXN * txn, const char *fname, const char *dbname, DBTYPE dbtype, uint32_t flags, int mode) {
HANDLE_PANICKED_DB(db);
HANDLE_READ_ONLY_TXN(txn);
if (dbname != NULL) {
return db_open_subdb(db, txn, fname, dbname, dbtype, flags, mode);
}
......@@ -348,7 +347,6 @@ void toku_db_lt_on_destroy_callback(toku::locktree *lt) {
int
toku_db_open_iname(DB * db, DB_TXN * txn, const char *iname_in_env, uint32_t flags, int mode) {
//Set comparison functions if not yet set.
HANDLE_READ_ONLY_TXN(txn);
if (!db->i->key_compare_was_set && db->dbenv->i->bt_compare) {
toku_ft_set_bt_compare(db->i->ft_handle, db->dbenv->i->bt_compare);
db->i->key_compare_was_set = true;
......@@ -471,7 +469,6 @@ int toku_db_pre_acquire_fileops_lock(DB *db, DB_TXN *txn) {
static int
toku_db_change_descriptor(DB *db, DB_TXN* txn, const DBT* descriptor, uint32_t flags) {
HANDLE_PANICKED_DB(db);
HANDLE_READ_ONLY_TXN(txn);
HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
int r = 0;
TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : NULL;
......@@ -698,7 +695,6 @@ autotxn_db_getf_set (DB *db, DB_TXN *txn, uint32_t flags, DBT *key, YDB_CALLBACK
static int
locked_db_open(DB *db, DB_TXN *txn, const char *fname, const char *dbname, DBTYPE dbtype, uint32_t flags, int mode) {
int ret, r;
HANDLE_READ_ONLY_TXN(txn);
HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
//
......@@ -1028,7 +1024,6 @@ load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], const char * new
int
locked_load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], char * new_inames_in_env[/*N*/], LSN *load_lsn, bool mark_as_loader) {
int ret, r;
HANDLE_READ_ONLY_TXN(txn);
DB_TXN *child_txn = NULL;
int using_txns = env->i->open_flags & DB_INIT_TXN;
......
......@@ -329,36 +329,6 @@ toku_txn_begin(DB_ENV *env, DB_TXN * stxn, DB_TXN ** txn, uint32_t flags) {
uint32_t txn_flags = 0;
txn_flags |= DB_TXN_NOWAIT; //We do not support blocking locks. RFP remove this?
// handle whether txn is declared as read only
bool parent_txn_declared_read_only =
stxn &&
(db_txn_struct_i(stxn)->flags & DB_TXN_READ_ONLY);
bool txn_declared_read_only = false;
if (flags & DB_TXN_READ_ONLY) {
txn_declared_read_only = true;
txn_flags |= DB_TXN_READ_ONLY;
flags &= ~(DB_TXN_READ_ONLY);
}
if (txn_declared_read_only && stxn &&
!parent_txn_declared_read_only
)
{
return toku_ydb_do_error(
env,
EINVAL,
"Current transaction set as read only, but parent transaction is not\n"
);
}
if (parent_txn_declared_read_only)
{
// don't require child transaction to also set transaction as read only
// if parent has already done so
txn_flags |= DB_TXN_READ_ONLY;
txn_declared_read_only = true;
}
TOKU_ISOLATION child_isolation = TOKU_ISO_SERIALIZABLE;
uint32_t iso_flags = flags & DB_ISOLATION_FLAGS;
if (!(iso_flags == 0 ||
......@@ -464,8 +434,7 @@ toku_txn_begin(DB_ENV *env, DB_TXN * stxn, DB_TXN ** txn, uint32_t flags) {
TXNID_PAIR_NONE,
snapshot_type,
result,
false, // for_recovery
txn_declared_read_only // read_only
false
);
if (r != 0) {
toku_free(result);
......
......@@ -132,7 +132,6 @@ int
toku_db_del(DB *db, DB_TXN *txn, DBT *key, uint32_t flags, bool holds_mo_lock) {
HANDLE_PANICKED_DB(db);
HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
HANDLE_READ_ONLY_TXN(txn);
uint32_t unchecked_flags = flags;
//DB_DELETE_ANY means delete regardless of whether it exists in the db.
......@@ -176,7 +175,6 @@ int
toku_db_put(DB *db, DB_TXN *txn, DBT *key, DBT *val, uint32_t flags, bool holds_mo_lock) {
HANDLE_PANICKED_DB(db);
HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
HANDLE_READ_ONLY_TXN(txn);
int r = 0;
uint32_t lock_flags = get_prelocked_flags(flags);
......@@ -224,7 +222,6 @@ toku_db_update(DB *db, DB_TXN *txn,
uint32_t flags) {
HANDLE_PANICKED_DB(db);
HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
HANDLE_READ_ONLY_TXN(txn);
int r = 0;
uint32_t lock_flags = get_prelocked_flags(flags);
......@@ -266,7 +263,6 @@ toku_db_update_broadcast(DB *db, DB_TXN *txn,
uint32_t flags) {
HANDLE_PANICKED_DB(db);
HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
HANDLE_READ_ONLY_TXN(txn);
int r = 0;
uint32_t lock_flags = get_prelocked_flags(flags);
......@@ -432,7 +428,6 @@ env_del_multiple(
DB_INDEXER* indexer = NULL;
HANDLE_PANICKED_ENV(env);
HANDLE_READ_ONLY_TXN(txn);
uint32_t lock_flags[num_dbs];
uint32_t remaining_flags[num_dbs];
......@@ -579,7 +574,6 @@ env_put_multiple_internal(
DB_INDEXER* indexer = NULL;
HANDLE_PANICKED_ENV(env);
HANDLE_READ_ONLY_TXN(txn);
uint32_t lock_flags[num_dbs];
uint32_t remaining_flags[num_dbs];
......@@ -680,7 +674,6 @@ env_update_multiple(DB_ENV *env, DB *src_db, DB_TXN *txn,
HANDLE_PANICKED_ENV(env);
DB_INDEXER* indexer = NULL;
HANDLE_READ_ONLY_TXN(txn);
if (!txn) {
r = EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment