Commit 5f2dcd11 authored by Marko Mäkelä's avatar Marko Mäkelä Committed by Daniel Black

MDEV-24167 fixup: srw_lock_debug instrumentation

While the index_lock and block_lock include debug instrumentation
to keep track of shared lock holders, such instrumentation was
never part of the simpler srw_lock, and therefore some users of the
class implemented a limited form of bookkeeping.

srw_lock_debug encapsulates srw_lock and adds the data members
writer, readers_lock, and readers to keep track of the threads that
hold the exclusive latch or any shared latches. The debug checks
are available also with SUX_LOCK_GENERIC (in environments that do not
implement a futex-like system call).

dict_sys_t::latch: Use srw_lock_debug in debug builds.
This makes the debug fields latch_ex, latch_readers redundant.

fil_space_t::latch: Use srw_lock_debug in debug builds.
This makes the debug field latch_count redundant.
The field latch_owner must be preserved, because
fil_space_t::is_owner() is being used in all builds.

lock_sys_t::latch: Use srw_lock_debug in debug builds.
This makes the debug fields writer, readers redundant.

lock_sys_t::is_holder(): A new debug predicate to check if
the current thread is holding lock_sys.latch in any mode.

trx_rseg_t::latch: Use srw_lock_debug in debug builds.
parent 8d54d173
......@@ -960,9 +960,6 @@ void dict_sys_t::lock_wait(SRW_LOCK_ARGS(const char *file, unsigned line))
{
latch.wr_lock(SRW_LOCK_ARGS(file, line));
latch_ex_wait_start.store(0, std::memory_order_relaxed);
ut_ad(!latch_readers);
ut_ad(!latch_ex);
ut_d(latch_ex= pthread_self());
return;
}
......@@ -978,31 +975,21 @@ void dict_sys_t::lock_wait(SRW_LOCK_ARGS(const char *file, unsigned line))
ib::warn() << "A long wait (" << waited
<< " seconds) was observed for dict_sys.latch";
latch.wr_lock(SRW_LOCK_ARGS(file, line));
ut_ad(!latch_readers);
ut_ad(!latch_ex);
ut_d(latch_ex= pthread_self());
}
#ifdef UNIV_PFS_RWLOCK
ATTRIBUTE_NOINLINE void dict_sys_t::unlock()
{
ut_ad(latch_ex == pthread_self());
ut_ad(!latch_readers);
ut_d(latch_ex= 0);
latch.wr_unlock();
}
ATTRIBUTE_NOINLINE void dict_sys_t::freeze(const char *file, unsigned line)
{
latch.rd_lock(file, line);
ut_ad(!latch_ex);
ut_d(latch_readers++);
}
ATTRIBUTE_NOINLINE void dict_sys_t::unfreeze()
{
ut_ad(!latch_ex);
ut_ad(latch_readers--);
latch.rd_unlock();
}
#endif /* UNIV_PFS_RWLOCK */
......
......@@ -1317,13 +1317,7 @@ class dict_sys_t
std::atomic<ulonglong> latch_ex_wait_start;
/** the rw-latch protecting the data dictionary cache */
alignas(CPU_LEVEL1_DCACHE_LINESIZE) srw_lock latch;
#ifdef UNIV_DEBUG
/** whether latch is being held in exclusive mode (by any thread) */
Atomic_relaxed<pthread_t> latch_ex;
/** number of S-latch holders */
Atomic_counter<uint32_t> latch_readers;
#endif
alignas(CPU_LEVEL1_DCACHE_LINESIZE) IF_DBUG(srw_lock_debug,srw_lock) latch;
public:
/** Indexes of SYS_TABLE[] */
enum
......@@ -1491,15 +1485,12 @@ class dict_sys_t
}
#ifdef UNIV_DEBUG
/** @return whether any thread (not necessarily the current thread)
is holding the latch; that is, this check may return false
positives */
bool frozen() const { return latch_readers || latch_ex; }
/** @return whether any thread (not necessarily the current thread)
is holding a shared latch */
bool frozen_not_locked() const { return latch_readers; }
/** @return whether the current thread is holding the latch */
bool frozen() const { return latch.have_any(); }
/** @return whether the current thread is holding a shared latch */
bool frozen_not_locked() const { return latch.have_rd(); }
/** @return whether the current thread holds the exclusive latch */
bool locked() const { return latch_ex == pthread_self(); }
bool locked() const { return latch.have_wr(); }
#endif
private:
/** Acquire the exclusive latch */
......@@ -1514,13 +1505,7 @@ class dict_sys_t
/** Exclusively lock the dictionary cache. */
void lock(SRW_LOCK_ARGS(const char *file, unsigned line))
{
if (latch.wr_lock_try())
{
ut_ad(!latch_readers);
ut_ad(!latch_ex);
ut_d(latch_ex= pthread_self());
}
else
if (!latch.wr_lock_try())
lock_wait(SRW_LOCK_ARGS(file, line));
}
......@@ -1533,27 +1518,11 @@ class dict_sys_t
ATTRIBUTE_NOINLINE void unfreeze();
#else
/** Unlock the data dictionary cache. */
void unlock()
{
ut_ad(latch_ex == pthread_self());
ut_ad(!latch_readers);
ut_d(latch_ex= 0);
latch.wr_unlock();
}
void unlock() { latch.wr_unlock(); }
/** Acquire a shared lock on the dictionary cache. */
void freeze()
{
latch.rd_lock();
ut_ad(!latch_ex);
ut_d(latch_readers++);
}
void freeze() { latch.rd_lock(); }
/** Release a shared lock on the dictionary cache. */
void unfreeze()
{
ut_ad(!latch_ex);
ut_ad(latch_readers--);
latch.rd_unlock();
}
void unfreeze() { latch.rd_unlock(); }
#endif
/** Estimate the used memory occupied by the data dictionary
......
......@@ -347,7 +347,6 @@ struct fil_space_t final
~fil_space_t()
{
ut_ad(!latch_owner);
ut_ad(!latch_count);
latch.destroy();
}
......@@ -414,9 +413,9 @@ struct fil_space_t final
/** The reference count */
static constexpr uint32_t PENDING= ~(STOPPING | CLOSING | NEEDS_FSYNC);
/** latch protecting all page allocation bitmap pages */
srw_lock latch;
IF_DBUG(srw_lock_debug, srw_lock) latch;
/** the thread that holds the exclusive latch, or 0 */
pthread_t latch_owner;
ut_d(Atomic_relaxed<uint32_t> latch_count;)
public:
/** MariaDB encryption data */
fil_space_crypt_t *crypt_data;
......@@ -1065,40 +1064,32 @@ struct fil_space_t final
bool recheck, bool encrypt);
#ifdef UNIV_DEBUG
bool is_latched() const { return latch_count != 0; }
bool is_latched() const { return latch.have_any(); }
#endif
bool is_owner() const { return latch_owner == pthread_self(); }
bool is_owner() const
{
const bool owner{latch_owner == pthread_self()};
ut_ad(owner == latch.have_wr());
return owner;
}
/** Acquire the allocation latch in exclusive mode */
void x_lock()
{
latch.wr_lock(SRW_LOCK_CALL);
ut_ad(!latch_owner);
latch_owner= pthread_self();
ut_ad(!latch_count.fetch_add(1));
}
/** Release the allocation latch from exclusive mode */
void x_unlock()
{
ut_ad(latch_count.fetch_sub(1) == 1);
ut_ad(latch_owner == pthread_self());
latch_owner= 0;
latch.wr_unlock();
}
/** Acquire the allocation latch in shared mode */
void s_lock()
{
ut_ad(!is_owner());
latch.rd_lock(SRW_LOCK_CALL);
ut_ad(!latch_owner);
ut_d(latch_count.fetch_add(1));
}
void s_lock() { latch.rd_lock(SRW_LOCK_CALL); }
/** Release the allocation latch from shared mode */
void s_unlock()
{
ut_ad(latch_count.fetch_sub(1));
ut_ad(!latch_owner);
latch.rd_unlock();
}
void s_unlock() { latch.rd_unlock(); }
typedef span<const char> name_type;
......
......@@ -724,13 +724,8 @@ class lock_sys_t
bool m_initialised;
/** mutex proteting the locks */
alignas(CPU_LEVEL1_DCACHE_LINESIZE) srw_spin_lock latch;
#ifdef UNIV_DEBUG
/** The owner of exclusive latch (0 if none); protected by latch */
std::atomic<pthread_t> writer{0};
/** Number of shared latches */
std::atomic<ulint> readers{0};
#endif
alignas(CPU_LEVEL1_DCACHE_LINESIZE)
IF_DBUG(srw_lock_debug,srw_spin_lock) latch;
#ifdef SUX_LOCK_GENERIC
protected:
/** mutex for hash_latch::wait() */
......@@ -789,71 +784,35 @@ class lock_sys_t
void wr_lock()
{
mysql_mutex_assert_not_owner(&wait_mutex);
ut_ad(!is_writer());
latch.wr_lock();
ut_ad(!writer.exchange(pthread_self(),
std::memory_order_relaxed));
}
/** Release exclusive lock_sys.latch */
void wr_unlock()
{
ut_ad(writer.exchange(0, std::memory_order_relaxed) ==
pthread_self());
latch.wr_unlock();
}
void wr_unlock() { latch.wr_unlock(); }
/** Acquire shared lock_sys.latch */
void rd_lock()
{
mysql_mutex_assert_not_owner(&wait_mutex);
ut_ad(!is_writer());
latch.rd_lock();
ut_ad(!writer.load(std::memory_order_relaxed));
ut_d(readers.fetch_add(1, std::memory_order_relaxed));
}
/** Release shared lock_sys.latch */
void rd_unlock()
{
ut_ad(!is_writer());
ut_ad(readers.fetch_sub(1, std::memory_order_relaxed));
latch.rd_unlock();
}
void rd_unlock() { latch.rd_unlock(); }
#endif
/** Try to acquire exclusive lock_sys.latch
@return whether the latch was acquired */
bool wr_lock_try()
{
ut_ad(!is_writer());
if (!latch.wr_lock_try()) return false;
ut_ad(!writer.exchange(pthread_self(),
std::memory_order_relaxed));
return true;
}
bool wr_lock_try() { return latch.wr_lock_try(); }
/** Try to acquire shared lock_sys.latch
@return whether the latch was acquired */
bool rd_lock_try()
{
ut_ad(!is_writer());
if (!latch.rd_lock_try()) return false;
ut_ad(!writer.load(std::memory_order_relaxed));
ut_d(readers.fetch_add(1, std::memory_order_relaxed));
return true;
}
bool rd_lock_try() { return latch.rd_lock_try(); }
/** Assert that wr_lock() has been invoked by this thread */
void assert_locked() const { ut_ad(is_writer()); }
void assert_locked() const { ut_ad(latch.have_wr()); }
/** Assert that wr_lock() has not been invoked by this thread */
void assert_unlocked() const { ut_ad(!is_writer()); }
void assert_unlocked() const { ut_ad(!latch.have_wr()); }
#ifdef UNIV_DEBUG
/** @return whether the current thread is the lock_sys.latch writer */
bool is_writer() const
{
# ifdef SUX_LOCK_GENERIC
return writer.load(std::memory_order_relaxed) == pthread_self();
# else
return writer.load(std::memory_order_relaxed) == pthread_self() ||
(xtest() && !latch.is_locked_or_waiting());
# endif
}
bool is_writer() const { return latch.have_wr(); }
/** @return whether the current thread is holding lock_sys.latch */
bool is_holder() const { return latch.have_any(); }
/** Assert that a lock shard is exclusively latched (by some thread) */
void assert_locked(const lock_t &lock) const;
/** Assert that a table lock shard is exclusively latched by this thread */
......@@ -965,14 +924,14 @@ extern lock_sys_t lock_sys;
/** @return the index of an array element */
inline ulint lock_sys_t::hash_table::calc_hash(ulint fold) const
{
ut_ad(lock_sys.is_writer() || lock_sys.readers);
ut_ad(lock_sys.is_holder());
return calc_hash(fold, n_cells);
}
/** Get a hash table cell. */
inline hash_cell_t *lock_sys_t::hash_table::cell_get(ulint fold) const
{
ut_ad(lock_sys.is_writer() || lock_sys.readers);
ut_ad(lock_sys.is_holder());
return &array[calc_hash(fold)];
}
......
......@@ -155,7 +155,7 @@ template<bool spinloop> class srw_lock_impl;
/** Slim shared-update-exclusive lock with no recursion */
template<bool spinloop>
class ssux_lock_impl final
class ssux_lock_impl
{
#ifdef UNIV_PFS_RWLOCK
friend class ssux_lock;
......@@ -552,3 +552,51 @@ typedef srw_lock_impl<false> srw_lock;
typedef srw_lock_impl<true> srw_spin_lock;
#endif
#ifdef UNIV_DEBUG
# include <unordered_set>
class srw_lock_debug : private srw_lock
{
/** The owner of the exclusive lock (0 if none) */
std::atomic<pthread_t> writer;
/** Protects readers */
mutable srw_mutex readers_lock;
/** Threads that hold the lock in shared mode */
std::atomic<std::unordered_multiset<pthread_t>*> readers;
/** Register a read lock. */
void readers_register();
public:
void SRW_LOCK_INIT(mysql_pfs_key_t key);
void destroy();
#ifndef SUX_LOCK_GENERIC
/** @return whether any lock may be held by any thread */
bool is_locked_or_waiting() const noexcept
{ return srw_lock::is_locked_or_waiting(); }
/** @return whether an exclusive lock may be held by any thread */
bool is_write_locked() const noexcept { return srw_lock::is_write_locked(); }
#endif
/** Acquire an exclusive lock */
void wr_lock(SRW_LOCK_ARGS(const char *file, unsigned line));
/** @return whether an exclusive lock was acquired */
bool wr_lock_try();
/** Release after wr_lock() */
void wr_unlock();
/** Acquire a shared lock */
void rd_lock(SRW_LOCK_ARGS(const char *file, unsigned line));
/** @return whether a shared lock was acquired */
bool rd_lock_try();
/** Release after rd_lock() */
void rd_unlock();
/** @return whether this thread is between rd_lock() and rd_unlock() */
bool have_rd() const noexcept;
/** @return whether this thread is between wr_lock() and wr_unlock() */
bool have_wr() const noexcept;
/** @return whether this thread is holding rd_lock() or wr_lock() */
bool have_any() const noexcept;
};
#endif
......@@ -59,7 +59,7 @@ struct alignas(CPU_LEVEL1_DCACHE_LINESIZE) trx_rseg_t
/** tablespace containing the rollback segment; constant after init() */
fil_space_t *space;
/** latch protecting everything except page_no, space */
srw_spin_lock latch;
IF_DBUG(srw_lock_debug,srw_spin_lock) latch;
/** rollback segment header page number; constant after init() */
uint32_t page_no;
/** length of the TRX_RSEG_HISTORY list (number of transactions) */
......
......@@ -173,7 +173,7 @@ void lock_sys_t::assert_locked(const dict_table_t &table) const
ut_ad(!table.is_temporary());
if (is_writer())
return;
ut_ad(readers);
ut_ad(latch.have_rd());
ut_ad(table.lock_mutex_is_owner());
}
......@@ -182,7 +182,7 @@ void lock_sys_t::hash_table::assert_locked(const page_id_t id) const
{
if (lock_sys.is_writer())
return;
ut_ad(lock_sys.readers);
ut_ad(lock_sys.is_holder());
ut_ad(latch(cell_get(id.fold()))->is_locked());
}
......@@ -191,7 +191,7 @@ void lock_sys_t::assert_locked(const hash_cell_t &cell) const
{
if (is_writer())
return;
ut_ad(lock_sys.readers);
ut_ad(lock_sys.is_holder());
ut_ad(hash_table::latch(const_cast<hash_cell_t*>(&cell))->is_locked());
}
#endif
......@@ -426,13 +426,10 @@ void lock_sys_t::wr_lock(const char *file, unsigned line)
{
mysql_mutex_assert_not_owner(&wait_mutex);
latch.wr_lock(file, line);
ut_ad(!writer.exchange(pthread_self(), std::memory_order_relaxed));
}
/** Release exclusive lock_sys.latch */
void lock_sys_t::wr_unlock()
{
ut_ad(writer.exchange(0, std::memory_order_relaxed) ==
pthread_self());
latch.wr_unlock();
}
......@@ -441,15 +438,11 @@ void lock_sys_t::rd_lock(const char *file, unsigned line)
{
mysql_mutex_assert_not_owner(&wait_mutex);
latch.rd_lock(file, line);
ut_ad(!writer.load(std::memory_order_relaxed));
ut_d(readers.fetch_add(1, std::memory_order_relaxed));
}
/** Release shared lock_sys.latch */
void lock_sys_t::rd_unlock()
{
ut_ad(!writer.load(std::memory_order_relaxed));
ut_ad(readers.fetch_sub(1, std::memory_order_relaxed));
latch.rd_unlock();
}
#endif
......
......@@ -261,9 +261,7 @@ void mtr_t::rollback_to_savepoint(ulint begin, ulint end)
/** Set create_lsn. */
inline void fil_space_t::set_create_lsn(lsn_t lsn)
{
#ifndef SUX_LOCK_GENERIC
ut_ad(latch.is_write_locked());
#endif
ut_ad(latch.have_wr());
/* Concurrent log_checkpoint_low() must be impossible. */
mysql_mutex_assert_owner(&log_sys.mutex);
create_lsn= lsn;
......
......@@ -548,3 +548,120 @@ template void ssux_lock_impl<false>::rd_unlock();
template void ssux_lock_impl<false>::u_unlock();
template void ssux_lock_impl<false>::wr_unlock();
#endif /* UNIV_PFS_RWLOCK */
#ifdef UNIV_DEBUG
void srw_lock_debug::SRW_LOCK_INIT(mysql_pfs_key_t key)
{
srw_lock::SRW_LOCK_INIT(key);
readers_lock.init();
ut_ad(!readers.load(std::memory_order_relaxed));
ut_ad(!have_any());
}
void srw_lock_debug::destroy()
{
ut_ad(!writer);
if (auto r= readers.load(std::memory_order_relaxed))
{
readers.store(0, std::memory_order_relaxed);
ut_ad(r->empty());
delete r;
}
srw_lock::destroy();
}
bool srw_lock_debug::wr_lock_try()
{
ut_ad(!have_any());
if (!srw_lock::wr_lock_try())
return false;
ut_ad(!writer);
writer.store(pthread_self(), std::memory_order_relaxed);
return true;
}
void srw_lock_debug::wr_lock(SRW_LOCK_ARGS(const char *file, unsigned line))
{
ut_ad(!have_any());
srw_lock::wr_lock(SRW_LOCK_ARGS(file, line));
ut_ad(!writer);
writer.store(pthread_self(), std::memory_order_relaxed);
}
void srw_lock_debug::wr_unlock()
{
ut_ad(have_wr());
writer.store(0, std::memory_order_relaxed);
srw_lock::wr_unlock();
}
void srw_lock_debug::readers_register()
{
readers_lock.wr_lock();
auto r= readers.load(std::memory_order_relaxed);
if (!r)
{
r= new std::unordered_multiset<pthread_t>();
readers.store(r, std::memory_order_relaxed);
}
r->emplace(pthread_self());
readers_lock.wr_unlock();
}
bool srw_lock_debug::rd_lock_try()
{
ut_ad(!have_any());
if (!srw_lock::rd_lock_try())
return false;
readers_register();
return true;
}
void srw_lock_debug::rd_lock(SRW_LOCK_ARGS(const char *file, unsigned line))
{
ut_ad(!have_any());
srw_lock::rd_lock(SRW_LOCK_ARGS(file, line));
readers_register();
}
void srw_lock_debug::rd_unlock()
{
const pthread_t self= pthread_self();
ut_ad(writer != self);
readers_lock.wr_lock();
auto r= readers.load(std::memory_order_relaxed);
ut_ad(r);
auto i= r->find(self);
ut_ad(i != r->end());
r->erase(i);
readers_lock.wr_unlock();
srw_lock::rd_unlock();
}
bool srw_lock_debug::have_rd() const noexcept
{
if (auto r= readers.load(std::memory_order_relaxed))
{
readers_lock.wr_lock();
bool found= r->find(pthread_self()) != r->end();
readers_lock.wr_unlock();
ut_ad(!found || is_locked());
return found;
}
return false;
}
bool srw_lock_debug::have_wr() const noexcept
{
if (writer != pthread_self())
return false;
ut_ad(is_write_locked());
return true;
}
bool srw_lock_debug::have_any() const noexcept
{
return have_wr() || have_rd();
}
#endif
......@@ -395,9 +395,7 @@ static void trx_purge_free_segment(buf_block_t *rseg_hdr, buf_block_t *block,
void purge_sys_t::rseg_enable(trx_rseg_t &rseg)
{
ut_ad(this == &purge_sys);
#ifndef SUX_LOCK_GENERIC
ut_ad(rseg.latch.is_write_locked());
#endif
ut_ad(rseg.latch.have_wr());
uint8_t skipped= skipped_rseg;
ut_ad(skipped < TRX_SYS_N_RSEGS);
if (&rseg == &trx_sys.rseg_array[skipped])
......@@ -873,9 +871,7 @@ void purge_sys_t::rseg_get_next_history_log()
{
fil_addr_t prev_log_addr;
#ifndef SUX_LOCK_GENERIC
ut_ad(rseg->latch.is_write_locked());
#endif
ut_ad(rseg->latch.have_wr());
ut_a(rseg->last_page_no != FIL_NULL);
tail.trx_no= rseg->last_trx_no() + 1;
......@@ -991,9 +987,7 @@ inline trx_purge_rec_t purge_sys_t::get_next_rec(roll_ptr_t roll_ptr)
{
ut_ad(next_stored);
ut_ad(tail.trx_no < low_limit_no());
#ifndef SUX_LOCK_GENERIC
ut_ad(rseg->latch.is_write_locked());
#endif
ut_ad(rseg->latch.have_wr());
if (!offset)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment