Commit a1190947 authored by Leif Walsh's avatar Leif Walsh

changed to ANNOTATE_IGNORE_{READS,WRITES}_{BEGIN,END}

DRD seems to prefer those annotations to
DRD_IGNORE_VAR/DRD_STOP_IGNORING_VAR.

Helgrind is stupid, and VALGRIND_HG_{DIS,EN}ABLE_CHECKING pairs
bizarrely "reclaim" the memory location for the current thread when
re-enabling checking.  So they probably aren't completely suitable for
our purposes, but Helgrind provides no facility for doing what we want
because it just wants to watch the world burn.  So we add a couple of
Helgrind suppressions in addition.

Also, added a cute RAII wrapper for reading a variable in a racy way.
Writing can be done the same way if we only want to appease DRD, but
because Helgrind does things on a memory location-basis and not a
thread-basis, things get a little tricky.
parent bd415e73
......@@ -330,7 +330,7 @@ void locktree::sto_migrate_buffer_ranges_to_tree(void *prepared_lkr) {
bool locktree::sto_try_acquire(void *prepared_lkr,
TXNID txnid,
const DBT *left_key, const DBT *right_key) {
if (m_rangetree->is_empty() && m_sto_buffer.is_empty() && toku_drd_unsafe_fetch(&m_sto_score) >= STO_SCORE_THRESHOLD) {
if (m_rangetree->is_empty() && m_sto_buffer.is_empty() && data_race::unsafe_read<int>(m_sto_score) >= STO_SCORE_THRESHOLD) {
// We can do the optimization because the rangetree is empty, and
// we know its worth trying because the sto score is big enough.
sto_begin(txnid);
......@@ -536,16 +536,16 @@ void locktree::remove_overlapping_locks_for_txnid(TXNID txnid,
}
bool locktree::sto_txnid_is_valid_unsafe(void) const {
return toku_drd_unsafe_fetch(&m_sto_txnid) != TXNID_NONE;
return data_race::unsafe_read<TXNID>(m_sto_txnid) != TXNID_NONE;
}
int locktree::sto_get_score_unsafe(void) const {
return toku_drd_unsafe_fetch(&m_sto_score);
return data_race::unsafe_read<int>(m_sto_score);
}
bool locktree::sto_try_release(TXNID txnid) {
bool released = false;
if (sto_txnid_is_valid_unsafe()) {
if (data_race::unsafe_read<TXNID>(m_sto_txnid) != TXNID_NONE) {
// check the bit again with a prepared locked keyrange,
// which protects the optimization bits and rangetree data
concurrent_tree::locked_keyrange lkr;
......@@ -585,7 +585,7 @@ void locktree::release_locks(TXNID txnid, const range_buffer *ranges) {
// the threshold and we'll try the optimization again. This
// is how a previously multithreaded system transitions into
// a single threaded system that benefits from the optimization.
if (sto_get_score_unsafe() < STO_SCORE_THRESHOLD) {
if (data_race::unsafe_read<int>(m_sto_score) < STO_SCORE_THRESHOLD) {
toku_sync_fetch_and_add(&m_sto_score, 1);
}
}
......
......@@ -103,6 +103,10 @@ PATENT RIGHTS GRANT:
# define TOKU_VALGRIND_HG_DISABLE_CHECKING(p, size) VALGRIND_HG_DISABLE_CHECKING(p, size)
# define TOKU_DRD_IGNORE_VAR(v) DRD_IGNORE_VAR(v)
# define TOKU_DRD_STOP_IGNORING_VAR(v) DRD_STOP_IGNORING_VAR(v)
# define TOKU_ANNOTATE_IGNORE_READS_BEGIN() ANNOTATE_IGNORE_READS_BEGIN()
# define TOKU_ANNOTATE_IGNORE_READS_END() ANNOTATE_IGNORE_READS_END()
# define TOKU_ANNOTATE_IGNORE_WRITES_BEGIN() ANNOTATE_IGNORE_WRITES_BEGIN()
# define TOKU_ANNOTATE_IGNORE_WRITES_END() ANNOTATE_IGNORE_WRITES_END()
/*
* How to make helgrind happy about tree rotations and new mutex orderings:
......@@ -134,21 +138,42 @@ PATENT RIGHTS GRANT:
# define TOKU_VALGRIND_HG_DISABLE_CHECKING(p, size) ((void) 0)
# define TOKU_DRD_IGNORE_VAR(v)
# define TOKU_DRD_STOP_IGNORING_VAR(v)
# define TOKU_ANNOTATE_IGNORE_READS_BEGIN() ((void) 0)
# define TOKU_ANNOTATE_IGNORE_READS_END() ((void) 0)
# define TOKU_ANNOTATE_IGNORE_WRITES_BEGIN() ((void) 0)
# define TOKU_ANNOTATE_IGNORE_WRITES_END() ((void) 0)
# define TOKU_VALGRIND_RESET_MUTEX_ORDERING_INFO(mutex)
# define RUNNING_ON_VALGRIND (0U)
#endif
namespace data_race {
template<typename T>
class unsafe_read {
const T &_val;
public:
unsafe_read(const T &val)
: _val(val) {
TOKU_VALGRIND_HG_DISABLE_CHECKING(&_val, sizeof _val);
TOKU_ANNOTATE_IGNORE_READS_BEGIN();
}
~unsafe_read() {
TOKU_ANNOTATE_IGNORE_READS_END();
TOKU_VALGRIND_HG_ENABLE_CHECKING(&_val, sizeof _val);
}
operator T() const {
return _val;
}
};
} // namespace data_race
// Unsafely fetch and return a `T' from src, telling drd to ignore
// racey access to src for the next sizeof(*src) bytes
template <typename T>
T toku_drd_unsafe_fetch(T *src) {
TOKU_VALGRIND_HG_DISABLE_CHECKING(src, sizeof *src);
TOKU_DRD_IGNORE_VAR(*src);
T val = *src;
TOKU_DRD_STOP_IGNORING_VAR(*src);
TOKU_VALGRIND_HG_ENABLE_CHECKING(src, sizeof *src);
return val;
return data_race::unsafe_read<T>(*src);
}
// Unsafely set a `T' value into *dest from src, telling drd to ignore
......@@ -156,8 +181,8 @@ T toku_drd_unsafe_fetch(T *src) {
template <typename T>
void toku_drd_unsafe_set(T *dest, const T src) {
TOKU_VALGRIND_HG_DISABLE_CHECKING(dest, sizeof *dest);
TOKU_DRD_IGNORE_VAR(*dest);
TOKU_ANNOTATE_IGNORE_WRITES_BEGIN();
*dest = src;
TOKU_DRD_STOP_IGNORING_VAR(*dest);
TOKU_ANNOTATE_IGNORE_WRITES_END();
TOKU_VALGRIND_HG_ENABLE_CHECKING(dest, sizeof *dest);
}
......@@ -123,4 +123,24 @@
fun:pthread_cond_destroy@*
...
fun:_ZN7evictor7destroyEv
}
\ No newline at end of file
}
{
<helgrind_doesnt_understand_the_way_the_world_works_and_ignores_our_disable_checking_instructions>
Helgrind:Race
fun:_ZN4toku8locktree15sto_try_acquireEPvmPK10__toku_dbtS4_
fun:_ZN4toku8locktree12acquire_lockEbmPK10__toku_dbtS3_PNS_9txnid_setE
fun:_ZN4toku8locktree16try_acquire_lockEbmPK10__toku_dbtS3_PNS_9txnid_setEb
fun:_ZN4toku8locktree18acquire_write_lockEmPK10__toku_dbtS3_PNS_9txnid_setEb
fun:_ZN4toku12lock_request5startEv
...
}
{
<helgrind_bug_323432_see_http://permalink.gmane.org/gmane.comp.debugging.valgrind/13325>
Helgrind:Race
obj:/usr/lib/valgrind/vgpreload_helgrind-amd64-linux.so
fun:pthread_mutex_destroy
fun:toku_mutex_destroy
fun:_ZN4toku8treenode4freeEPS0_
fun:_ZN4toku8treenode22remove_root_of_subtreeEv
...
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment