Commit 254d71b5 authored by Rich Prohaska's avatar Rich Prohaska

Merge branch 'master' into releases/tokudb-7.5

parents 02cd38d8 00078457
......@@ -2237,7 +2237,7 @@ static int ft_leaf_get_relative_key_pos(FT ft, FTNODE leaf, const DBT *key, bool
nullptr, nullptr, nullptr
);
*target_childnum = childnum;
if (r == 0 && !le_latest_is_del(leftmost_le)) {
if (r == 0 && !le_latest_is_del(target_le)) {
*nondeleted_key_found = true;
}
}
......
......@@ -330,7 +330,7 @@ void locktree::sto_migrate_buffer_ranges_to_tree(void *prepared_lkr) {
bool locktree::sto_try_acquire(void *prepared_lkr,
TXNID txnid,
const DBT *left_key, const DBT *right_key) {
if (m_rangetree->is_empty() && m_sto_buffer.is_empty() && toku_drd_unsafe_fetch(&m_sto_score) >= STO_SCORE_THRESHOLD) {
if (m_rangetree->is_empty() && m_sto_buffer.is_empty() && data_race::unsafe_read<int>(m_sto_score) >= STO_SCORE_THRESHOLD) {
// We can do the optimization because the rangetree is empty, and
// we know its worth trying because the sto score is big enough.
sto_begin(txnid);
......@@ -536,16 +536,16 @@ void locktree::remove_overlapping_locks_for_txnid(TXNID txnid,
}
bool locktree::sto_txnid_is_valid_unsafe(void) const {
return toku_drd_unsafe_fetch(&m_sto_txnid) != TXNID_NONE;
return data_race::unsafe_read<TXNID>(m_sto_txnid) != TXNID_NONE;
}
int locktree::sto_get_score_unsafe(void) const {
return toku_drd_unsafe_fetch(&m_sto_score);
return data_race::unsafe_read<int>(m_sto_score);
}
bool locktree::sto_try_release(TXNID txnid) {
bool released = false;
if (sto_txnid_is_valid_unsafe()) {
if (data_race::unsafe_read<TXNID>(m_sto_txnid) != TXNID_NONE) {
// check the bit again with a prepared locked keyrange,
// which protects the optimization bits and rangetree data
concurrent_tree::locked_keyrange lkr;
......@@ -585,7 +585,7 @@ void locktree::release_locks(TXNID txnid, const range_buffer *ranges) {
// the threshold and we'll try the optimization again. This
// is how a previously multithreaded system transitions into
// a single threaded system that benefits from the optimization.
if (sto_get_score_unsafe() < STO_SCORE_THRESHOLD) {
if (data_race::unsafe_read<int>(m_sto_score) < STO_SCORE_THRESHOLD) {
toku_sync_fetch_and_add(&m_sto_score, 1);
}
}
......
......@@ -103,6 +103,10 @@ PATENT RIGHTS GRANT:
# define TOKU_VALGRIND_HG_DISABLE_CHECKING(p, size) VALGRIND_HG_DISABLE_CHECKING(p, size)
# define TOKU_DRD_IGNORE_VAR(v) DRD_IGNORE_VAR(v)
# define TOKU_DRD_STOP_IGNORING_VAR(v) DRD_STOP_IGNORING_VAR(v)
# define TOKU_ANNOTATE_IGNORE_READS_BEGIN() ANNOTATE_IGNORE_READS_BEGIN()
# define TOKU_ANNOTATE_IGNORE_READS_END() ANNOTATE_IGNORE_READS_END()
# define TOKU_ANNOTATE_IGNORE_WRITES_BEGIN() ANNOTATE_IGNORE_WRITES_BEGIN()
# define TOKU_ANNOTATE_IGNORE_WRITES_END() ANNOTATE_IGNORE_WRITES_END()
/*
* How to make helgrind happy about tree rotations and new mutex orderings:
......@@ -134,21 +138,42 @@ PATENT RIGHTS GRANT:
# define TOKU_VALGRIND_HG_DISABLE_CHECKING(p, size) ((void) 0)
# define TOKU_DRD_IGNORE_VAR(v)
# define TOKU_DRD_STOP_IGNORING_VAR(v)
# define TOKU_ANNOTATE_IGNORE_READS_BEGIN() ((void) 0)
# define TOKU_ANNOTATE_IGNORE_READS_END() ((void) 0)
# define TOKU_ANNOTATE_IGNORE_WRITES_BEGIN() ((void) 0)
# define TOKU_ANNOTATE_IGNORE_WRITES_END() ((void) 0)
# define TOKU_VALGRIND_RESET_MUTEX_ORDERING_INFO(mutex)
# define RUNNING_ON_VALGRIND (0U)
#endif
namespace data_race {
template<typename T>
class unsafe_read {
const T &_val;
public:
unsafe_read(const T &val)
: _val(val) {
TOKU_VALGRIND_HG_DISABLE_CHECKING(&_val, sizeof _val);
TOKU_ANNOTATE_IGNORE_READS_BEGIN();
}
~unsafe_read() {
TOKU_ANNOTATE_IGNORE_READS_END();
TOKU_VALGRIND_HG_ENABLE_CHECKING(&_val, sizeof _val);
}
operator T() const {
return _val;
}
};
} // namespace data_race
// Unsafely fetch and return a `T' from src, telling drd to ignore
// racey access to src for the next sizeof(*src) bytes
template <typename T>
T toku_drd_unsafe_fetch(T *src) {
TOKU_VALGRIND_HG_DISABLE_CHECKING(src, sizeof *src);
TOKU_DRD_IGNORE_VAR(*src);
T val = *src;
TOKU_DRD_STOP_IGNORING_VAR(*src);
TOKU_VALGRIND_HG_ENABLE_CHECKING(src, sizeof *src);
return val;
return data_race::unsafe_read<T>(*src);
}
// Unsafely set a `T' value into *dest from src, telling drd to ignore
......@@ -156,8 +181,8 @@ T toku_drd_unsafe_fetch(T *src) {
template <typename T>
void toku_drd_unsafe_set(T *dest, const T src) {
TOKU_VALGRIND_HG_DISABLE_CHECKING(dest, sizeof *dest);
TOKU_DRD_IGNORE_VAR(*dest);
TOKU_ANNOTATE_IGNORE_WRITES_BEGIN();
*dest = src;
TOKU_DRD_STOP_IGNORING_VAR(*dest);
TOKU_ANNOTATE_IGNORE_WRITES_END();
TOKU_VALGRIND_HG_ENABLE_CHECKING(dest, sizeof *dest);
}
......@@ -486,6 +486,7 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
ydb/loader-stress-test4z.tdb
ydb/recover_stress.tdb
ydb/test3529.tdb
ydb/test_insert_unique.tdb
)
set_tests_properties(${phenomenally_long_tests} PROPERTIES TIMEOUT 14400)
endif(BUILD_TESTING OR BUILD_SRC_TESTS)
......@@ -123,4 +123,24 @@
fun:pthread_cond_destroy@*
...
fun:_ZN7evictor7destroyEv
}
\ No newline at end of file
}
{
<helgrind_doesnt_understand_the_way_the_world_works_and_ignores_our_disable_checking_instructions>
Helgrind:Race
fun:_ZN4toku8locktree15sto_try_acquireEPvmPK10__toku_dbtS4_
fun:_ZN4toku8locktree12acquire_lockEbmPK10__toku_dbtS3_PNS_9txnid_setE
fun:_ZN4toku8locktree16try_acquire_lockEbmPK10__toku_dbtS3_PNS_9txnid_setEb
fun:_ZN4toku8locktree18acquire_write_lockEmPK10__toku_dbtS3_PNS_9txnid_setEb
fun:_ZN4toku12lock_request5startEv
...
}
{
<helgrind_bug_323432_see_http://permalink.gmane.org/gmane.comp.debugging.valgrind/13325>
Helgrind:Race
obj:/usr/lib/valgrind/vgpreload_helgrind-amd64-linux.so
fun:pthread_mutex_destroy
fun:toku_mutex_destroy
fun:_ZN4toku8treenode4freeEPS0_
fun:_ZN4toku8treenode22remove_root_of_subtreeEv
...
}
......@@ -133,7 +133,7 @@ static void test_large_sequential_insert_unique(DB_ENV *env) {
r = db->set_readpagesize(db, 2 * 1024); CKERR(r);
r = db->open(db, NULL, "db", NULL, DB_BTREE, DB_CREATE, 0644); CKERR(r);
const int val_size = 1024;
const int val_size = 8;
char *XMALLOC_N(val_size, val_buf);
memset(val_buf, 'k', val_size);
DBT val;
......@@ -153,9 +153,18 @@ static void test_large_sequential_insert_unique(DB_ENV *env) {
// .. but re-inserting is okay, if we provisionally deleted the row
DB_TXN *txn;
r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
r = db->del(db, NULL, &key, DB_DELETE_ANY); CKERR(r);
r = db->put(db, NULL, &key, &val, DB_NOOVERWRITE); CKERR(r);
r = db->del(db, txn, &key, DB_DELETE_ANY); CKERR(r);
r = db->put(db, txn, &key, &val, DB_NOOVERWRITE); CKERR(r);
r = txn->commit(txn, 0); CKERR(r);
// re-inserting is also ok if we actually delete the row, for some key < k
if (i > 0) {
DBT other_key;
int other_k = toku_htonl(i - 10);
dbt_init(&other_key, &other_k, sizeof(other_k));
r = db->del(db, NULL, &other_key, DB_DELETE_ANY); CKERR(r);
r = db->put(db, NULL, &other_key, &val, DB_NOOVERWRITE); CKERR(r);
}
}
if (i > 0 && i % 250 == 0) {
// sanity check - unique checks on random keys we already inserted should
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment