Commit fdae71f8 authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-26828 Spinning on buf_pool.page_hash is wasting CPU cycles

page_hash_latch: Only use the spinlock implementation on
SUX_LOCK_GENERIC platforms (those for which we do not implement
a futex-like interface). Use srw_spin_mutex on 32-bit systems
(except Microsoft Windows) to satisfy the size constraints.

rw_lock::is_read_locked(): Remove. We will use the slightly
broader assertion is_locked().

srw_lock_: Implement is_locked(), is_write_locked() in a hacky
way for the Microsoft Windows SRWLOCK. This should be acceptable,
because we are only using these predicates in debug assertions
(or later, in lock elision), and false positives should not matter.
parent 5caff202
......@@ -277,6 +277,7 @@ the read requests for the whole area.
*/
#ifndef UNIV_INNOCHECKSUM
# ifdef SUX_LOCK_GENERIC
void page_hash_latch::read_lock_wait()
{
/* First, try busy spinning for a while. */
......@@ -309,6 +310,7 @@ void page_hash_latch::write_lock_wait()
std::this_thread::yield();
while (!write_lock_poll());
}
# endif
constexpr std::chrono::microseconds WAIT_FOR_READ(100);
constexpr int WAIT_FOR_WRITE= 100;
......@@ -2297,7 +2299,7 @@ buf_page_t* buf_page_get_zip(const page_id_t page_id, ulint zip_size)
#endif /* UNIV_DEBUG */
}
ut_ad(hash_lock->is_read_locked());
ut_ad(hash_lock->is_locked());
if (!bpage->zip.data)
{
......
......@@ -1551,7 +1551,10 @@ class buf_pool_t
buf_page_t *bpage= page_hash_get_low(page_id, fold);
if (!bpage || watch_is_sentinel(*bpage))
{
latch->release<exclusive>();
if (exclusive)
latch->write_unlock();
else
latch->read_unlock();
if (hash_lock)
*hash_lock= nullptr;
return watch ? bpage : nullptr;
......@@ -1562,8 +1565,10 @@ class buf_pool_t
if (hash_lock)
*hash_lock= latch; /* to be released by the caller */
else if (exclusive)
latch->write_unlock();
else
latch->release<exclusive>();
latch->read_unlock();
return bpage;
}
public:
......@@ -1785,7 +1790,10 @@ class buf_pool_t
template<bool exclusive> page_hash_latch *lock(ulint fold)
{
page_hash_latch *latch= lock_get(fold, n_cells);
latch->acquire<exclusive>();
if (exclusive)
latch->write_lock();
else
latch->read_lock();
return latch;
}
......@@ -2024,6 +2032,7 @@ class buf_pool_t
/** The InnoDB buffer pool */
extern buf_pool_t buf_pool;
#ifdef SUX_LOCK_GENERIC
inline void page_hash_latch::read_lock()
{
mysql_mutex_assert_not_owner(&buf_pool.mutex);
......@@ -2036,6 +2045,7 @@ inline void page_hash_latch::write_lock()
if (!write_trylock())
write_lock_wait();
}
#endif /* SUX_LOCK_GENERIC */
inline void buf_page_t::add_buf_fix_count(uint32_t count)
{
......
......@@ -178,6 +178,7 @@ enum rw_lock_type_t
#include "sux_lock.h"
#ifdef SUX_LOCK_GENERIC
class page_hash_latch : public rw_lock
{
public:
......@@ -190,23 +191,31 @@ class page_hash_latch : public rw_lock
inline void read_lock();
/** Acquire an exclusive lock */
inline void write_lock();
/** Acquire a lock */
template<bool exclusive> void acquire()
{
if (exclusive)
write_lock();
else
read_lock();
}
/** Release a lock */
template<bool exclusive> void release()
{
if (exclusive)
write_unlock();
else
read_unlock();
}
};
#elif defined _WIN32 || SIZEOF_SIZE_T >= 8
class page_hash_latch
{
srw_spin_lock_low lock;
public:
void read_lock() { lock.rd_lock(); }
void read_unlock() { lock.rd_unlock(); }
void write_lock() { lock.wr_lock(); }
void write_unlock() { lock.wr_unlock(); }
bool is_locked() const { return lock.is_locked(); }
bool is_write_locked() const { return lock.is_write_locked(); }
};
#else
class page_hash_latch
{
srw_spin_mutex lock;
public:
void read_lock() { write_lock(); }
void read_unlock() { write_unlock(); }
void write_lock() { lock.wr_lock(); }
void write_unlock() { lock.wr_unlock(); }
bool is_locked() const { return lock.is_locked(); }
bool is_write_locked() const { return is_locked(); }
};
#endif
#endif /* !UNIV_INNOCHECKSUM */
......@@ -229,12 +229,6 @@ class rw_lock
bool is_update_locked() const
{ return !!(lock.load(std::memory_order_relaxed) & UPDATER); }
#endif /* SUX_LOCK_GENERIC */
/** @return whether a shared lock is being held by any thread */
bool is_read_locked() const
{
auto l= lock.load(std::memory_order_relaxed);
return (l & ~WRITER_PENDING) && !(l & WRITER);
}
/** @return whether any lock is being held or waited for by any thread */
bool is_locked_or_waiting() const
{ return lock.load(std::memory_order_relaxed) != 0; }
......
......@@ -175,10 +175,7 @@ class ssux_lock_impl final
# ifndef DBUG_OFF
/** @return whether the lock is being held or waited for */
bool is_vacant() const
{
return !readers.load(std::memory_order_relaxed) &&
!writer.is_locked_or_waiting();
}
{ return !is_locked() && !writer.is_locked_or_waiting(); }
# endif /* !DBUG_OFF */
bool rd_lock_try()
......@@ -250,7 +247,7 @@ class ssux_lock_impl final
void wr_u_downgrade()
{
DBUG_ASSERT(writer.is_locked());
DBUG_ASSERT(readers.load(std::memory_order_relaxed) == WRITER);
DBUG_ASSERT(is_write_locked());
readers.store(1, std::memory_order_release);
/* Note: Any pending rd_lock() will not be woken up until u_unlock() */
}
......@@ -272,10 +269,16 @@ class ssux_lock_impl final
}
void wr_unlock()
{
DBUG_ASSERT(readers.load(std::memory_order_relaxed) == WRITER);
DBUG_ASSERT(is_write_locked());
readers.store(0, std::memory_order_release);
writer.wr_unlock();
}
/** @return whether an exclusive lock may be held by any thread */
bool is_write_locked() const noexcept
{ return readers.load(std::memory_order_relaxed) == WRITER; }
/** @return whether any lock may be held by any thread */
bool is_locked() const noexcept
{ return readers.load(std::memory_order_relaxed) != 0; }
#endif
};
......@@ -308,6 +311,18 @@ class srw_lock_
{ return IF_WIN(TryAcquireSRWLockExclusive(&lock), !rw_trywrlock(&lock)); }
void wr_unlock()
{ IF_WIN(ReleaseSRWLockExclusive(&lock), rw_unlock(&lock)); }
#ifdef _WIN32
/** @return whether any lock may be held by any thread */
bool is_locked_or_waiting() const noexcept { return (size_t&)(lock) != 0; }
/** @return whether any lock may be held by any thread */
bool is_locked() const noexcept { return is_locked_or_waiting(); }
/** @return whether an exclusive lock may be held by any thread */
bool is_write_locked() const noexcept
{
// FIXME: this returns false positives for shared locks
return is_locked();
}
#endif
};
template<> void srw_lock_<true>::rd_wait();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment