Commit fb7caad7 authored by Sergey Vojtovich's avatar Sergey Vojtovich

MDEV-11296 - InnoDB stalls under OLTP RW on P8

Simplify away recursive flag: it is not necessary for rw-locks to operate
properly. Now writer_thread != 0 means recursive.

As we only need correct value of writer_thread only in writer_thread itself
it is rather safe to load and update it non-atomically.

This patch also fixes potential reorder of "writer_thread" and "recursive"
loads (aka MDEV-7148), which was reopened along with InnoDB thread fences
simplification. Previous versions are unaffected, because they have os_rmb
in rw_lock_lock_word_decr(). It wasn't observed at the moment of writing
though.
parent 68a85373
......@@ -510,22 +510,6 @@ rw_lock_lock_word_decr(
rw_lock_t* lock, /*!< in/out: rw-lock */
ulint amount, /*!< in: amount to decrement */
lint threshold); /*!< in: threshold of judgement */
/******************************************************************//**
This function sets the lock->writer_thread and lock->recursive fields.
For platforms where we are using atomic builtins instead of lock->mutex
it sets the lock->writer_thread field using atomics to ensure memory
ordering. Note that it is assumed that the caller of this function
effectively owns the lock i.e.: nobody else is allowed to modify
lock->writer_thread at this point in time.
The protocol is that lock->writer_thread MUST be updated BEFORE the
lock->recursive flag is set. */
UNIV_INLINE
void
rw_lock_set_writer_id_and_recursion_flag(
/*=====================================*/
rw_lock_t* lock, /*!< in/out: lock to work on */
bool recursive); /*!< in: true if recursion
allowed */
#ifdef UNIV_DEBUG
/******************************************************************//**
Checks if the thread has locked the rw-lock in the specified mode, with
......@@ -612,17 +596,6 @@ struct rw_lock_t
/** 1: there are waiters */
volatile uint32_t waiters;
/** Default value FALSE which means the lock is non-recursive.
The value is typically set to TRUE making normal rw_locks recursive.
In case of asynchronous IO, when a non-zero value of 'pass' is
passed then we keep the lock non-recursive.
This flag also tells us about the state of writer_thread field.
If this flag is set then writer_thread MUST contain the thread
id of the current x-holder or wait-x thread. This flag must be
reset in x_unlock functions before incrementing the lock_word */
volatile bool recursive;
/** number of granted SX locks. */
volatile ulint sx_recursive;
......@@ -632,8 +605,12 @@ struct rw_lock_t
causing much memory bus traffic */
bool writer_is_wait_ex;
/** Thread id of writer thread. Is only guaranteed to have sane
and non-stale value iff recursive flag is set. */
/** The value is typically set to thread id of a writer thread making
normal rw_locks recursive. In case of asynchronous IO, when a non-zero
value of 'pass' is passed then we keep the lock non-recursive.
writer_thread must be reset in x_unlock functions before incrementing
the lock_word. */
volatile os_thread_id_t writer_thread;
/** Used by sync0arr.cc for thread queueing */
......
......@@ -223,28 +223,6 @@ rw_lock_lock_word_decr(
return(false);
}
/******************************************************************//**
This function sets the lock->writer_thread and lock->recursive fields.
For platforms where we are using atomic builtins instead of lock->mutex
it sets the lock->writer_thread field using atomics to ensure memory
ordering. Note that it is assumed that the caller of this function
effectively owns the lock i.e.: nobody else is allowed to modify
lock->writer_thread at this point in time.
The protocol is that lock->writer_thread MUST be updated BEFORE the
lock->recursive flag is set. */
UNIV_INLINE
void
rw_lock_set_writer_id_and_recursion_flag(
/*=====================================*/
rw_lock_t* lock, /*!< in/out: lock to work on */
bool recursive) /*!< in: true if recursion
allowed */
{
os_thread_id_t curr_thread = os_thread_get_curr_id();
my_atomic_storelong(&lock->writer_thread, (long) curr_thread);
lock->recursive = recursive;
}
/******************************************************************//**
Low-level function which tries to lock an rw-lock in s-mode. Performs no
spinning.
......@@ -334,19 +312,12 @@ rw_lock_x_lock_func_nowait(
const char* file_name,/*!< in: file name where lock requested */
ulint line) /*!< in: line where requested */
{
ibool local_recursive= lock->recursive;
lint oldval = X_LOCK_DECR;
/* Note: recursive must be loaded before writer_thread see
comment for rw_lock_set_writer_id_and_recursion_flag().
To achieve this we load it before my_atomic_caslint(),
which implies full memory barrier in current implementation. */
if (my_atomic_caslint(&lock->lock_word, &oldval, 0)) {
rw_lock_set_writer_id_and_recursion_flag(lock, true);
lock->writer_thread = os_thread_get_curr_id();
} else if (local_recursive
&& os_thread_eq(lock->writer_thread,
os_thread_get_curr_id())) {
} else if (os_thread_eq(lock->writer_thread, os_thread_get_curr_id())) {
/* Relock: this lock_word modification is safe since no other
threads can modify (lock, unlock, or reserve) lock_word while
there is an exclusive writer and this is the writer thread. */
......@@ -435,15 +406,9 @@ rw_lock_x_unlock_func(
ut_ad(lock->lock_word == 0 || lock->lock_word == -X_LOCK_HALF_DECR
|| lock->lock_word <= -X_LOCK_DECR);
/* lock->recursive flag also indicates if lock->writer_thread is
valid or stale. If we are the last of the recursive callers
then we must unset lock->recursive flag to indicate that the
lock->writer_thread is now stale.
Note that since we still hold the x-lock we can safely read the
lock_word. */
if (lock->lock_word == 0) {
/* Last caller in a possible recursive chain. */
lock->recursive = FALSE;
lock->writer_thread = 0;
}
ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_X));
......@@ -500,9 +465,7 @@ rw_lock_sx_unlock_func(
if (lock->sx_recursive == 0) {
/* Last caller in a possible recursive chain. */
if (lock->lock_word > 0) {
lock->recursive = FALSE;
UNIV_MEM_INVALID(&lock->writer_thread,
sizeof lock->writer_thread);
lock->writer_thread = 0;
if (my_atomic_addlint(&lock->lock_word, X_LOCK_HALF_DECR) <= 0) {
ut_error;
......
......@@ -104,31 +104,12 @@ sx-lock holder.
The other members of the lock obey the following rules to remain consistent:
recursive: This and the writer_thread field together control the
behaviour of recursive x-locking or sx-locking.
lock->recursive must be FALSE in following states:
1) The writer_thread contains garbage i.e.: the
lock has just been initialized.
2) The lock is not x-held and there is no
x-waiter waiting on WAIT_EX event.
3) The lock is x-held or there is an x-waiter
waiting on WAIT_EX event but the 'pass' value
is non-zero.
lock->recursive is TRUE iff:
1) The lock is x-held or there is an x-waiter
waiting on WAIT_EX event and the 'pass' value
is zero.
This flag must be set after the writer_thread field
has been updated with a memory ordering barrier.
It is unset before the lock_word has been incremented.
writer_thread: Is used only in recursive x-locking. Can only be safely
read iff lock->recursive flag is TRUE.
This field is uninitialized at lock creation time and
is updated atomically when x-lock is acquired or when
move_ownership is called. A thread is only allowed to
set the value of this field to it's thread_id i.e.: a
thread cannot set writer_thread to some other thread's
id.
writer_thread: Is used only in recursive x-locking or sx-locking.
This field is 0 at lock creation time and is updated
when x-lock is acquired or when move_ownership is called.
A thread is only allowed to set the value of this field to
it's thread_id i.e.: a thread cannot set writer_thread to
some other thread's id.
waiters: May be set to 1 anytime, but to avoid unnecessary wake-up
signals, it should only be set to 1 when there are threads
waiting on event. Must be 1 when a writer starts waiting to
......@@ -236,14 +217,8 @@ rw_lock_create_func(
lock->lock_word = X_LOCK_DECR;
lock->waiters = 0;
/* We set this value to signify that lock->writer_thread
contains garbage at initialization and cannot be used for
recursive x-locking. */
lock->recursive = FALSE;
lock->sx_recursive = 0;
/* Silence Valgrind when UNIV_DEBUG_VALGRIND is not enabled. */
memset((void*) &lock->writer_thread, 0, sizeof lock->writer_thread);
UNIV_MEM_INVALID(&lock->writer_thread, sizeof lock->writer_thread);
lock->writer_thread= 0;
#ifdef UNIV_DEBUG
lock->m_rw_lock = true;
......@@ -437,7 +412,7 @@ rw_lock_x_lock_move_ownership(
{
ut_ad(rw_lock_is_locked(lock, RW_LOCK_X));
rw_lock_set_writer_id_and_recursion_flag(lock, true);
lock->writer_thread = os_thread_get_curr_id();
}
/******************************************************************//**
......@@ -557,20 +532,17 @@ rw_lock_x_lock_low(
const char* file_name,/*!< in: file name where lock requested */
ulint line) /*!< in: line where requested */
{
ibool local_recursive= lock->recursive;
if (rw_lock_lock_word_decr(lock, X_LOCK_DECR, X_LOCK_HALF_DECR)) {
/* lock->recursive also tells us if the writer_thread
field is stale or active. As we are going to write
our own thread id in that field it must be that the
current writer_thread value is not active. */
ut_a(!lock->recursive);
/* As we are going to write our own thread id in that field it
must be that the current writer_thread value is not active. */
ut_a(!lock->writer_thread);
/* Decrement occurred: we are writer or next-writer. */
rw_lock_set_writer_id_and_recursion_flag(
lock, !pass);
if (!pass)
{
lock->writer_thread = os_thread_get_curr_id();
}
rw_lock_x_lock_wait(lock, pass, 0, file_name, line);
......@@ -578,13 +550,8 @@ rw_lock_x_lock_low(
os_thread_id_t thread_id = os_thread_get_curr_id();
/* Decrement failed: An X or SX lock is held by either
this thread or another. Try to relock.
Note: recursive must be loaded before writer_thread see
comment for rw_lock_set_writer_id_and_recursion_flag().
To achieve this we load it before rw_lock_lock_word_decr(),
which implies full memory barrier in current implementation. */
if (!pass && local_recursive
&& os_thread_eq(lock->writer_thread, thread_id)) {
this thread or another. Try to relock. */
if (!pass && os_thread_eq(lock->writer_thread, thread_id)) {
/* Other s-locks can be allowed. If it is request x
recursively while holding sx lock, this x lock should
be along with the latching-order. */
......@@ -645,19 +612,17 @@ rw_lock_sx_lock_low(
const char* file_name,/*!< in: file name where lock requested */
ulint line) /*!< in: line where requested */
{
ibool local_recursive= lock->recursive;
if (rw_lock_lock_word_decr(lock, X_LOCK_HALF_DECR, X_LOCK_HALF_DECR)) {
/* lock->recursive also tells us if the writer_thread
field is stale or active. As we are going to write
our own thread id in that field it must be that the
current writer_thread value is not active. */
ut_a(!lock->recursive);
/* As we are going to write our own thread id in that field it
must be that the current writer_thread value is not active. */
ut_a(!lock->writer_thread);
/* Decrement occurred: we are the SX lock owner. */
rw_lock_set_writer_id_and_recursion_flag(
lock, !pass);
if (!pass)
{
lock->writer_thread = os_thread_get_curr_id();
}
lock->sx_recursive = 1;
} else {
......@@ -665,13 +630,8 @@ rw_lock_sx_lock_low(
/* Decrement failed: It already has an X or SX lock by this
thread or another thread. If it is this thread, relock,
else fail.
Note: recursive must be loaded before writer_thread see
comment for rw_lock_set_writer_id_and_recursion_flag().
To achieve this we load it before rw_lock_lock_word_decr(),
which implies full memory barrier in current implementation. */
if (!pass && local_recursive
&& os_thread_eq(lock->writer_thread, thread_id)) {
else fail. */
if (!pass && os_thread_eq(lock->writer_thread, thread_id)) {
/* This thread owns an X or SX lock */
if (lock->sx_recursive++ == 0) {
/* This thread is making first SX-lock request
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment