Commit 8751aa73 authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-25404: ssux_lock_low: Introduce a separate writer mutex

Having both readers and writers use a single lock word in
futex system calls caused performance regression compared to
SRW_LOCK_DUMMY (mutex and 2 condition variables).
A contributing factor is that we did not accurately keep
track of the number of waiting threads and thus had to invoke
system calls to wake up any waiting threads.

SUX_LOCK_GENERIC: Renamed from SRW_LOCK_DUMMY. This is the
original implementation, with rw_lock (std::atomic<uint32_t>),
a mutex and two condition variables. Using a separate writer
mutex (as described below) is not possible, because the mutex ownership
in a buf_block_t::lock must be able to transfer from a write submitter
thread to an I/O completion thread, and pthread_mutex_lock() may assume
that the submitter thread is recursively acquiring the mutex that it
already holds, while in reality the I/O completion thread is the real
owner. POSIX does not define an interface for requesting a mutex to
be non-recursive.

On Microsoft Windows, srw_lock_low will remain a simple wrapper of
SRWLOCK. On 32-bit Microsoft Windows, sizeof(SRWLOCK)=4 while
sizeof(srw_lock_low)=8.

On other platforms, srw_lock_low is an alias of ssux_lock_low,
the Simple (non-recursive) Shared/Update/eXclusive lock.

In the futex-based implementation of ssux_lock_low (Linux, OpenBSD,
Microsoft Windows), we shall use a dedicated mutex for exclusive
requests (writer), and have a WRITER flag in the 'readers' lock word
to inform that a writer is holding the lock or waiting for the lock to
be granted. When the WRITER flag is set, all lock requests must acquire
the writer mutex. Normally, shared (S) lock requests simply perform a
compare-and-swap on the 'readers' word.

Update locks are implemented as a combination of writer mutex
and a normal counter in the 'readers' lock word. The conflict between
U and X locks is guaranteed by the writer mutex.
Unlike SUX_LOCK_GENERIC, wr_u_downgrade() will not wake up any pending
rd_lock() waits. They will wait until u_unlock() releases the writer mutex.

The ssux_lock_low is always wrapped by sux_lock (with a recursion count
of U and X locks), used for dict_index_t::lock and buf_block_t::lock.
Their memory footprint for the futex-based implementation will increase
by sizeof(srw_mutex), or 4 bytes.

This change addresses a performance regression in read-only benchmarks,
such as sysbench oltp_read_only. Also write performance was improved.

On 32-bit Linux and OpenBSD, lock_sys_t::hash_table will allocate
two hash table elements for each srw_lock (14 instead of 15 hash
table cells per 64-byte cache line on IA-32). On Microsoft Windows,
sizeof(SRWLOCK)==sizeof(void*) and there is no change.

Reviewed by: Vladislav Vaintroub
Tested by: Axel Schwenke and Vladislav Vaintroub
parent 040c16ab
......@@ -548,7 +548,7 @@ class lock_sys_t
/** Hash table latch */
struct hash_latch
#if defined SRW_LOCK_DUMMY && !defined _WIN32
#ifdef SUX_LOCK_GENERIC
: private rw_lock
{
/** Wait for an exclusive lock */
......@@ -577,15 +577,18 @@ class lock_sys_t
{ return memcmp(this, field_ref_zero, sizeof *this); }
#endif
};
static_assert(sizeof(hash_latch) <= sizeof(void*), "compatibility");
public:
struct hash_table
{
/** Number of consecutive array[] elements occupied by a hash_latch */
static constexpr size_t LATCH= sizeof(void*) >= sizeof(hash_latch) ? 1 : 2;
static_assert(sizeof(hash_latch) <= LATCH * sizeof(void*), "allocation");
/** Number of array[] elements per hash_latch.
Must be one less than a power of 2. */
Must be LATCH less than a power of 2. */
static constexpr size_t ELEMENTS_PER_LATCH= CPU_LEVEL1_DCACHE_LINESIZE /
sizeof(void*) - 1;
sizeof(void*) - LATCH;
/** number of payload elements in array[]. Protected by lock_sys.latch. */
ulint n_cells;
......@@ -608,11 +611,13 @@ class lock_sys_t
/** @return the index of an array element */
inline ulint calc_hash(ulint fold) const;
/** @return raw array index converted to padded index */
static ulint pad(ulint h) { return 1 + (h / ELEMENTS_PER_LATCH) + h; }
static ulint pad(ulint h)
{ return LATCH + LATCH * (h / ELEMENTS_PER_LATCH) + h; }
/** Get a latch. */
static hash_latch *latch(hash_cell_t *cell)
{
void *l= ut_align_down(cell, (ELEMENTS_PER_LATCH + 1) * sizeof *cell);
void *l= ut_align_down(cell, sizeof *cell *
(ELEMENTS_PER_LATCH + LATCH));
return static_cast<hash_latch*>(l);
}
/** Get a hash table cell. */
......@@ -646,7 +651,7 @@ class lock_sys_t
/** Number of shared latches */
std::atomic<ulint> readers{0};
#endif
#if defined SRW_LOCK_DUMMY && !defined _WIN32
#ifdef SUX_LOCK_GENERIC
protected:
/** mutex for hash_latch::wait() */
pthread_mutex_t hash_mutex;
......
......@@ -20,7 +20,17 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include <atomic>
#include "my_dbug.h"
#if !(defined __linux__ || defined __OpenBSD__ || defined _WIN32)
# define SUX_LOCK_GENERIC
#elif 0 // defined SAFE_MUTEX
# define SUX_LOCK_GENERIC /* Use dummy implementation for debugging purposes */
#endif
#ifdef SUX_LOCK_GENERIC
/** Simple read-update-write lock based on std::atomic */
#else
/** Simple read-write lock based on std::atomic */
#endif
class rw_lock
{
/** The lock word */
......@@ -35,8 +45,10 @@ class rw_lock
static constexpr uint32_t WRITER_WAITING= 1U << 30;
/** Flag to indicate that write_lock() or write_lock_wait() is pending */
static constexpr uint32_t WRITER_PENDING= WRITER | WRITER_WAITING;
#ifdef SUX_LOCK_GENERIC
/** Flag to indicate that an update lock exists */
static constexpr uint32_t UPDATER= 1U << 29;
#endif /* SUX_LOCK_GENERIC */
/** Start waiting for an exclusive lock.
@return current value of the lock word */
......@@ -54,7 +66,9 @@ class rw_lock
@tparam prioritize_updater whether to ignore WRITER_WAITING for UPDATER
@param l the value of the lock word
@return whether the lock was acquired */
#ifdef SUX_LOCK_GENERIC
template<bool prioritize_updater= false>
#endif /* SUX_LOCK_GENERIC */
bool read_trylock(uint32_t &l)
{
l= UNLOCKED;
......@@ -62,14 +76,19 @@ class rw_lock
std::memory_order_relaxed))
{
DBUG_ASSERT(!(WRITER & l) || !(~WRITER_PENDING & l));
#ifdef SUX_LOCK_GENERIC
DBUG_ASSERT((~(WRITER_PENDING | UPDATER) & l) < UPDATER);
if (prioritize_updater
? (WRITER & l) || ((WRITER_WAITING | UPDATER) & l) == WRITER_WAITING
: (WRITER_PENDING & l))
#else /* SUX_LOCK_GENERIC */
if (l & WRITER_PENDING)
#endif /* SUX_LOCK_GENERIC */
return false;
}
return true;
}
#ifdef SUX_LOCK_GENERIC
/** Try to acquire an update lock.
@param l the value of the lock word
@return whether the lock was acquired */
......@@ -116,6 +135,7 @@ class rw_lock
lock.fetch_xor(WRITER | UPDATER, std::memory_order_relaxed);
DBUG_ASSERT((l & ~WRITER_WAITING) == WRITER);
}
#endif /* SUX_LOCK_GENERIC */
/** Wait for an exclusive lock.
@return whether the exclusive lock was acquired */
......@@ -141,10 +161,15 @@ class rw_lock
bool read_unlock()
{
auto l= lock.fetch_sub(1, std::memory_order_release);
#ifdef SUX_LOCK_GENERIC
DBUG_ASSERT(~(WRITER_PENDING | UPDATER) & l); /* at least one read lock */
#else /* SUX_LOCK_GENERIC */
DBUG_ASSERT(~(WRITER_PENDING) & l); /* at least one read lock */
#endif /* SUX_LOCK_GENERIC */
DBUG_ASSERT(!(l & WRITER)); /* no write lock must have existed */
return (~WRITER_PENDING & l) == 1;
}
#ifdef SUX_LOCK_GENERIC
/** Release an update lock */
void update_unlock()
{
......@@ -153,13 +178,18 @@ class rw_lock
/* the update lock must have existed */
DBUG_ASSERT((l & (WRITER | UPDATER)) == UPDATER);
}
#endif /* SUX_LOCK_GENERIC */
/** Release an exclusive lock */
void write_unlock()
{
IF_DBUG_ASSERT(auto l=,)
lock.fetch_and(~WRITER, std::memory_order_release);
/* the write lock must have existed */
#ifdef SUX_LOCK_GENERIC
DBUG_ASSERT((l & (WRITER | UPDATER)) == WRITER);
#else /* SUX_LOCK_GENERIC */
DBUG_ASSERT(l & WRITER);
#endif /* SUX_LOCK_GENERIC */
}
/** Try to acquire a shared lock.
@return whether the lock was acquired */
......@@ -176,9 +206,11 @@ class rw_lock
/** @return whether an exclusive lock is being held by any thread */
bool is_write_locked() const
{ return !!(lock.load(std::memory_order_relaxed) & WRITER); }
#ifdef SUX_LOCK_GENERIC
/** @return whether an update lock is being held by any thread */
bool is_update_locked() const
{ return !!(lock.load(std::memory_order_relaxed) & UPDATER); }
#endif /* SUX_LOCK_GENERIC */
/** @return whether a shared lock is being held by any thread */
bool is_read_locked() const
{
......
......@@ -18,14 +18,9 @@ this program; if not, write to the Free Software Foundation, Inc.,
#pragma once
#include "univ.i"
#include "rw_lock.h"
#if !(defined __linux__ || defined __OpenBSD__)
# define SRW_LOCK_DUMMY
#elif 0 // defined SAFE_MUTEX
# define SRW_LOCK_DUMMY /* Use dummy implementation for debugging purposes */
#endif
#if defined SRW_LOCK_DUMMY
#ifdef SUX_LOCK_GENERIC
/** An exclusive-only variant of srw_lock */
class srw_mutex final
{
......@@ -85,25 +80,25 @@ class srw_mutex final
};
#endif
#include "rw_lock.h"
/** Slim shared-update-exclusive lock with no recursion */
class ssux_lock_low final : private rw_lock
class ssux_lock_low final
#ifdef SUX_LOCK_GENERIC
: private rw_lock
#endif
{
#ifdef UNIV_PFS_RWLOCK
friend class ssux_lock;
# if defined SRW_LOCK_DUMMY || defined _WIN32
# ifdef SUX_LOCK_GENERIC
# elif defined _WIN32
# else
friend class srw_lock;
# endif
#endif
#ifdef SRW_LOCK_DUMMY
#ifdef SUX_LOCK_GENERIC
pthread_mutex_t mutex;
pthread_cond_t cond_shared;
pthread_cond_t cond_exclusive;
#endif
/** @return pointer to the lock word */
rw_lock *word() { return static_cast<rw_lock*>(this); }
/** Wait for a read lock.
@param l lock word from a failed read_trylock() */
void read_lock(uint32_t l);
......@@ -119,18 +114,14 @@ class ssux_lock_low final : private rw_lock
/** Wait for signal
@param l lock word from a failed acquisition */
inline void readers_wait(uint32_t l);
/** Send signal to one waiter */
inline void writer_wake();
/** Send signal to all waiters */
inline void readers_wake();
/** Wake waiters */
inline void wake();
public:
#ifdef SRW_LOCK_DUMMY
void init();
void destroy();
#else
void init() { DBUG_ASSERT(!is_locked_or_waiting()); }
void destroy() { DBUG_ASSERT(!is_locked_or_waiting()); }
#endif
/** @return whether any writer is waiting */
bool is_waiting() const { return (value() & WRITER_WAITING) != 0; }
bool rd_lock_try() { uint32_t l; return read_trylock(l); }
bool wr_lock_try() { return write_trylock(); }
void rd_lock() { uint32_t l; if (!read_trylock(l)) read_lock(l); }
......@@ -142,18 +133,135 @@ class ssux_lock_low final : private rw_lock
void rd_unlock();
void u_unlock();
void wr_unlock();
#else
/** mutex for synchronization; held by U or X lock holders */
srw_mutex writer;
/** S or U holders, and WRITER flag for X holder or waiter */
std::atomic<uint32_t> readers;
/** indicates an X request; readers=WRITER indicates granted X lock */
static constexpr uint32_t WRITER= 1U << 31;
/** Wait for readers!=lk */
inline void wait(uint32_t lk);
/** Wait for readers!=lk|WRITER */
void wr_wait(uint32_t lk);
/** Wake up wait() on the last rd_unlock() */
void wake();
/** Acquire a read lock */
void rd_wait();
public:
void init() { DBUG_ASSERT(is_vacant()); }
void destroy() { DBUG_ASSERT(is_vacant()); }
/** @return whether any writer is waiting */
bool is_waiting() const { return value() & WRITER_WAITING; }
bool is_waiting() const
{ return (readers.load(std::memory_order_relaxed) & WRITER) != 0; }
# ifndef DBUG_OFF
/** @return whether the lock is being held or waited for */
bool is_vacant() const
{
return !readers.load(std::memory_order_relaxed) &&
!writer.is_locked_or_waiting();
}
# endif /* !DBUG_OFF */
bool rd_lock_try()
{
uint32_t lk= 0;
while (!readers.compare_exchange_weak(lk, lk + 1,
std::memory_order_acquire,
std::memory_order_relaxed))
if (lk & WRITER)
return false;
return true;
}
bool u_lock_try()
{
if (!writer.wr_lock_try())
return false;
IF_DBUG_ASSERT(uint32_t lk=,)
readers.fetch_add(1, std::memory_order_acquire);
DBUG_ASSERT(lk < WRITER - 1);
return true;
}
bool wr_lock_try()
{
if (!writer.wr_lock_try())
return false;
uint32_t lk= 0;
if (readers.compare_exchange_strong(lk, WRITER,
std::memory_order_acquire,
std::memory_order_relaxed))
return true;
writer.wr_unlock();
return false;
}
void rd_lock() { if (!rd_lock_try()) rd_wait(); }
void u_lock()
{
writer.wr_lock();
IF_DBUG_ASSERT(uint32_t lk=,)
readers.fetch_add(1, std::memory_order_acquire);
DBUG_ASSERT(lk < WRITER - 1);
}
void wr_lock()
{
writer.wr_lock();
if (uint32_t lk= readers.fetch_or(WRITER, std::memory_order_acquire))
wr_wait(lk);
}
void u_wr_upgrade()
{
DBUG_ASSERT(writer.is_locked());
uint32_t lk= 1;
if (!readers.compare_exchange_strong(lk, WRITER,
std::memory_order_acquire,
std::memory_order_relaxed))
wr_wait(lk);
}
void wr_u_downgrade()
{
DBUG_ASSERT(writer.is_locked());
DBUG_ASSERT(readers.load(std::memory_order_relaxed) == WRITER);
readers.store(1, std::memory_order_release);
/* Note: Any pending rd_lock() will not be woken up until u_unlock() */
}
void rd_unlock()
{
uint32_t lk= readers.fetch_sub(1, std::memory_order_release);
ut_ad(~WRITER & lk);
if (lk == WRITER + 1)
wake();
}
void u_unlock()
{
IF_DBUG_ASSERT(uint32_t lk=,)
readers.fetch_sub(1, std::memory_order_release);
DBUG_ASSERT(lk);
DBUG_ASSERT(lk < WRITER);
writer.wr_unlock();
}
void wr_unlock()
{
DBUG_ASSERT(readers.load(std::memory_order_relaxed) == WRITER);
readers.store(0, std::memory_order_release);
writer.wr_unlock();
}
#endif
};
#if defined SRW_LOCK_DUMMY || defined _WIN32
#ifdef _WIN32
/** Slim read-write lock */
class srw_lock_low
{
# ifdef UNIV_PFS_RWLOCK
friend class srw_lock;
# endif
# ifdef _WIN32
SRWLOCK lock;
public:
void init() {}
......@@ -164,7 +272,14 @@ class srw_lock_low
void wr_lock() { AcquireSRWLockExclusive(&lock); }
bool wr_lock_try() { return TryAcquireSRWLockExclusive(&lock); }
void wr_unlock() { ReleaseSRWLockExclusive(&lock); }
# else
};
#elif defined SUX_LOCK_GENERIC
/** Slim read-write lock */
class srw_lock_low
{
# ifdef UNIV_PFS_RWLOCK
friend class srw_lock;
# endif
rw_lock_t lock;
public:
void init() { my_rwlock_init(&lock, nullptr); }
......@@ -175,7 +290,6 @@ class srw_lock_low
void wr_lock() { rw_wrlock(&lock); }
bool wr_lock_try() { return !rw_trywrlock(&lock); }
void wr_unlock() { rw_unlock(&lock); }
# endif
};
#else
typedef ssux_lock_low srw_lock_low;
......
......@@ -96,7 +96,8 @@ void lock_sys_t::hash_table::resize(ulint n)
{
if (lock_t *lock= static_cast<lock_t*>(array[i].node))
{
ut_ad(i % (ELEMENTS_PER_LATCH + 1)); /* all hash_latch must vacated */
/* all hash_latch must vacated */
ut_ad(i % (ELEMENTS_PER_LATCH + LATCH) >= LATCH);
do
{
ut_ad(!lock->is_table());
......@@ -129,7 +130,7 @@ void lock_sys_t::hash_table::resize(ulint n)
n_cells= new_n_cells;
}
#if defined SRW_LOCK_DUMMY && !defined _WIN32
#ifdef SUX_LOCK_GENERIC
void lock_sys_t::hash_latch::wait()
{
pthread_mutex_lock(&lock_sys.hash_mutex);
......@@ -371,7 +372,7 @@ void lock_sys_t::create(ulint n_cells)
latch.SRW_LOCK_INIT(lock_latch_key);
mysql_mutex_init(lock_wait_mutex_key, &wait_mutex, nullptr);
#if defined SRW_LOCK_DUMMY && !defined _WIN32
#ifdef SUX_LOCK_GENERIC
pthread_mutex_init(&hash_mutex, nullptr);
pthread_cond_init(&hash_cond, nullptr);
#endif
......@@ -452,7 +453,7 @@ void lock_sys_t::close()
rec_hash.free();
prdt_hash.free();
prdt_page_hash.free();
#if defined SRW_LOCK_DUMMY && !defined _WIN32
#ifdef SUX_LOCK_GENERIC
pthread_mutex_destroy(&hash_mutex);
pthread_cond_destroy(&hash_cond);
#endif
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment