Commit 6b5c4a9e authored by Dmitry Lenev's avatar Dmitry Lenev

Fix for bug #51105 "MDL deadlock in rqg_mdl_stability test

on Windows".

On platforms where read-write lock implementation does not
prefer readers by default (Windows, Solaris) server might
have deadlocked while detecting MDL deadlock.

MDL deadlock detector relies on the fact that read-write
locks which are used in its implementation prefer readers
(see new comment for MDL_lock::m_rwlock for details).
So far MDL code assumed that default implementation of
read/write locks for the system has this property.
Indeed, this turned out ot be wrong, for example, for
Windows or Solaris. Thus MDL deadlock detector might have
deadlocked on these systems.

This fix simply adds portable implementation of read/write
lock which prefer readers and changes MDL code to use this
new type of synchronization primitive.

No test case is added as existing rqg_mdl_stability test can
serve as one.

config.h.cmake:
  Check for presence of pthread_rwlockattr_setkind_np to be
  able to determine if system natively supports read-write
  locks for which we can specify if readers or writers should
  be preferred.
configure.cmake:
  Check for presence of pthread_rwlockattr_setkind_np to be
  able to determine if system natively supports read-write
  locks for which we can specify if readers or writers should
  be preferred.
configure.in:
  Check for presence of pthread_rwlockattr_setkind_np to be
  able to determine if system natively supports read-write
  locks for which we can specify if readers or writers should
  be preferred.
include/my_pthread.h:
  Added support for portable read-write locks which prefer
  readers.
  To do so extended existing my_rw_lock_t implementation to
  support selection of whom to prefer depending on a flag.
mysys/thr_rwlock.c:
  Extended existing my_rw_lock_t implementation to support
  selection of whom to prefer depending on a flag.
  Added rw_pr_init() function implementing initialization of
  read-write locks preferring readers.
sql/mdl.cc:
  Use portable read-write locks which prefer readers instead of
  relying on that system implementation of read-write locks has
  this property (this was true for Linux/NPTL but was false,
  for example, for Windows and Solaris).
  Added comment explaining why preferring readers is important
  for MDL deadlock detector (thanks to Serg for example!).
sql/mdl.h:
  Use portable read-write locks which prefer readers instead of
  relying on that system implementation of read-write locks has
  this property (this was true for Linux/NPTL but was false,
  for example, for Windows and Solaris).
parent 093106f5
......@@ -205,6 +205,7 @@
#cmakedefine HAVE_PTHREAD_KEY_DELETE 1
#cmakedefine HAVE_PTHREAD_KILL 1
#cmakedefine HAVE_PTHREAD_RWLOCK_RDLOCK 1
#cmakedefine HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP 1
#cmakedefine HAVE_PTHREAD_SETPRIO_NP 1
#cmakedefine HAVE_PTHREAD_SETSCHEDPARAM 1
#cmakedefine HAVE_PTHREAD_SIGMASK 1
......
......@@ -308,6 +308,7 @@ CHECK_FUNCTION_EXISTS (pthread_condattr_setclock HAVE_PTHREAD_CONDATTR_SETCLOCK)
CHECK_FUNCTION_EXISTS (pthread_init HAVE_PTHREAD_INIT)
CHECK_FUNCTION_EXISTS (pthread_key_delete HAVE_PTHREAD_KEY_DELETE)
CHECK_FUNCTION_EXISTS (pthread_rwlock_rdlock HAVE_PTHREAD_RWLOCK_RDLOCK)
CHECK_FUNCTION_EXISTS (pthread_rwlockattr_setkind_np HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP)
CHECK_FUNCTION_EXISTS (pthread_sigmask HAVE_PTHREAD_SIGMASK)
CHECK_FUNCTION_EXISTS (pthread_threadmask HAVE_PTHREAD_THREADMASK)
CHECK_FUNCTION_EXISTS (pthread_yield_np HAVE_PTHREAD_YIELD_NP)
......
......@@ -2266,7 +2266,8 @@ AC_CHECK_FUNCS(alarm bcmp bfill bmove bsearch bzero \
locking longjmp lrand48 madvise mallinfo memcpy memmove \
mkstemp mlockall perror poll pread pthread_attr_create mmap mmap64 getpagesize \
pthread_attr_getstacksize pthread_attr_setstacksize pthread_condattr_create \
pthread_getsequence_np pthread_key_delete pthread_rwlock_rdlock pthread_sigmask \
pthread_getsequence_np pthread_key_delete pthread_rwlock_rdlock \
pthread_rwlockattr_setkind_np pthread_sigmask \
readlink realpath rename rint rwlock_init setupterm \
shmget shmat shmdt shmctl sigaction sigemptyset sigaddset \
sighold sigset sigthreadmask port_create sleep thr_yield \
......
......@@ -600,30 +600,76 @@ int my_pthread_fastmutex_lock(my_pthread_fastmutex_t *mp);
#define my_rwlock_init(A,B) rwlock_init((A),USYNC_THREAD,0)
#else
/* Use our own version of read/write locks */
typedef struct _my_rw_lock_t {
pthread_mutex_t lock; /* lock for structure */
pthread_cond_t readers; /* waiting readers */
pthread_cond_t writers; /* waiting writers */
int state; /* -1:writer,0:free,>0:readers */
int waiters; /* number of waiting writers */
} my_rw_lock_t;
#define NEED_MY_RW_LOCK 1
#define rw_lock_t my_rw_lock_t
#define my_rwlock_init(A,B) my_rw_init((A), 0)
#define rw_rdlock(A) my_rw_rdlock((A))
#define rw_wrlock(A) my_rw_wrlock((A))
#define rw_tryrdlock(A) my_rw_tryrdlock((A))
#define rw_trywrlock(A) my_rw_trywrlock((A))
#define rw_unlock(A) my_rw_unlock((A))
#define rwlock_destroy(A) my_rwlock_destroy((A))
#define rwlock_destroy(A) my_rw_destroy((A))
#endif /* USE_MUTEX_INSTEAD_OF_RW_LOCKS */
extern int my_rwlock_init(my_rw_lock_t *, void *);
extern int my_rwlock_destroy(my_rw_lock_t *);
/*
Portable read-write locks which prefer readers.
Required by some algorithms in order to provide correctness.
*/
#if defined(HAVE_PTHREAD_RWLOCK_RDLOCK) && defined(HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP)
/*
On systems which have a way to specify that readers should
be preferred through attribute mechanism (e.g. Linux) we use
system implementation of read/write locks.
*/
#define rw_pr_lock_t pthread_rwlock_t
extern int rw_pr_init(rw_pr_lock_t *);
#define rw_pr_rdlock(A) pthread_rwlock_rdlock(A)
#define rw_pr_wrlock(A) pthread_rwlock_wrlock(A)
#define rw_pr_tryrdlock(A) pthread_rwlock_tryrdlock(A)
#define rw_pr_trywrlock(A) pthread_rwlock_trywrlock(A)
#define rw_pr_unlock(A) pthread_rwlock_unlock(A)
#define rw_pr_destroy(A) pthread_rwlock_destroy(A)
#else
/* Otherwise we have to use our own implementation of read/write locks. */
#define NEED_MY_RW_LOCK 1
struct st_my_rw_lock_t;
#define rw_pr_lock_t my_rw_lock_t
extern int rw_pr_init(struct st_my_rw_lock_t *);
#define rw_pr_rdlock(A) my_rw_rdlock((A))
#define rw_pr_wrlock(A) my_rw_wrlock((A))
#define rw_pr_tryrdlock(A) my_rw_tryrdlock((A))
#define rw_pr_trywrlock(A) my_rw_trywrlock((A))
#define rw_pr_unlock(A) my_rw_unlock((A))
#define rw_pr_destroy(A) my_rw_destroy((A))
#endif /* defined(HAVE_PTHREAD_RWLOCK_RDLOCK) && defined(HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP) */
#ifdef NEED_MY_RW_LOCK
/*
On systems which don't support native read/write locks, or don't support
read/write locks which prefer readers we have to use own implementation.
*/
typedef struct st_my_rw_lock_t {
pthread_mutex_t lock; /* lock for structure */
pthread_cond_t readers; /* waiting readers */
pthread_cond_t writers; /* waiting writers */
int state; /* -1:writer,0:free,>0:readers */
int waiters; /* number of waiting writers */
my_bool prefer_readers;
} my_rw_lock_t;
extern int my_rw_init(my_rw_lock_t *, my_bool *);
extern int my_rw_destroy(my_rw_lock_t *);
extern int my_rw_rdlock(my_rw_lock_t *);
extern int my_rw_wrlock(my_rw_lock_t *);
extern int my_rw_unlock(my_rw_lock_t *);
extern int my_rw_tryrdlock(my_rw_lock_t *);
extern int my_rw_trywrlock(my_rw_lock_t *);
#endif /* USE_MUTEX_INSTEAD_OF_RW_LOCKS */
#endif /* NEED_MY_RW_LOCK */
#define GETHOSTBYADDR_BUFF_SIZE 2048
......
......@@ -16,7 +16,8 @@
/* Synchronization - readers / writer thread locks */
#include "mysys_priv.h"
#if defined(THREAD) && !defined(HAVE_PTHREAD_RWLOCK_RDLOCK) && !defined(HAVE_RWLOCK_INIT)
#if defined(THREAD)
#if defined(NEED_MY_RW_LOCK)
#include <errno.h>
/*
......@@ -58,7 +59,7 @@
* Mountain View, California 94043
*/
int my_rwlock_init(rw_lock_t *rwp, void *arg __attribute__((unused)))
int my_rw_init(my_rw_lock_t *rwp, my_bool *prefer_readers_attr)
{
pthread_condattr_t cond_attr;
......@@ -70,12 +71,14 @@ int my_rwlock_init(rw_lock_t *rwp, void *arg __attribute__((unused)))
rwp->state = 0;
rwp->waiters = 0;
/* If attribute argument is NULL use default value - prefer writers. */
rwp->prefer_readers= prefer_readers_attr ? *prefer_readers_attr : FALSE;
return(0);
}
int my_rwlock_destroy(rw_lock_t *rwp)
int my_rw_destroy(my_rw_lock_t *rwp)
{
pthread_mutex_destroy( &rwp->lock );
pthread_cond_destroy( &rwp->readers );
......@@ -84,12 +87,13 @@ int my_rwlock_destroy(rw_lock_t *rwp)
}
int my_rw_rdlock(rw_lock_t *rwp)
int my_rw_rdlock(my_rw_lock_t *rwp)
{
pthread_mutex_lock(&rwp->lock);
/* active or queued writers */
while (( rwp->state < 0 ) || rwp->waiters)
while (( rwp->state < 0 ) ||
(rwp->waiters && ! rwp->prefer_readers))
pthread_cond_wait( &rwp->readers, &rwp->lock);
rwp->state++;
......@@ -97,11 +101,12 @@ int my_rw_rdlock(rw_lock_t *rwp)
return(0);
}
int my_rw_tryrdlock(rw_lock_t *rwp)
int my_rw_tryrdlock(my_rw_lock_t *rwp)
{
int res;
pthread_mutex_lock(&rwp->lock);
if ((rwp->state < 0 ) || rwp->waiters)
if ((rwp->state < 0 ) ||
(rwp->waiters && ! rwp->prefer_readers))
res= EBUSY; /* Can't get lock */
else
{
......@@ -113,7 +118,7 @@ int my_rw_tryrdlock(rw_lock_t *rwp)
}
int my_rw_wrlock(rw_lock_t *rwp)
int my_rw_wrlock(my_rw_lock_t *rwp)
{
pthread_mutex_lock(&rwp->lock);
rwp->waiters++; /* another writer queued */
......@@ -127,7 +132,7 @@ int my_rw_wrlock(rw_lock_t *rwp)
}
int my_rw_trywrlock(rw_lock_t *rwp)
int my_rw_trywrlock(my_rw_lock_t *rwp)
{
int res;
pthread_mutex_lock(&rwp->lock);
......@@ -143,7 +148,7 @@ int my_rw_trywrlock(rw_lock_t *rwp)
}
int my_rw_unlock(rw_lock_t *rwp)
int my_rw_unlock(my_rw_lock_t *rwp)
{
DBUG_PRINT("rw_unlock",
("state: %d waiters: %d", rwp->state, rwp->waiters));
......@@ -160,7 +165,8 @@ int my_rw_unlock(rw_lock_t *rwp)
}
else
{
if ( --rwp->state == 0 ) /* no more readers */
if ( --rwp->state == 0 && /* no more readers */
rwp->waiters)
pthread_cond_signal( &rwp->writers );
}
......@@ -168,4 +174,30 @@ int my_rw_unlock(rw_lock_t *rwp)
return(0);
}
#endif
int rw_pr_init(struct st_my_rw_lock_t *rwlock)
{
my_bool prefer_readers_attr= TRUE;
return my_rw_init(rwlock, &prefer_readers_attr);
}
#else
/*
We are on system which has native read/write locks which support
preferring of readers.
*/
int rw_pr_init(rw_pr_lock_t *rwlock)
{
pthread_rwlockattr_t rwlock_attr;
pthread_rwlockattr_init(&rwlock_attr);
pthread_rwlockattr_setkind_np(&rwlock_attr, PTHREAD_RWLOCK_PREFER_READER_NP);
pthread_rwlock_init(rwlock, NULL);
pthread_rwlockattr_destroy(&rwlock_attr);
return 0;
}
#endif /* defined(NEED_MY_RW_LOCK) */
#endif /* defined(THREAD) */
......@@ -148,10 +148,37 @@ public:
/**
Read-write lock protecting this lock context.
TODO/FIXME: Replace with RW-lock which will prefer readers
on all platforms and not only on Linux.
@note The fact that we use read-write lock prefers readers here is
important as deadlock detector won't work correctly otherwise.
For example, imagine that we have following waiters graph:
ctxA -> obj1 -> ctxB -> obj1 -|
^ |
|----------------------------|
and both ctxA and ctxB start deadlock detection process:
ctxA read-locks obj1 ctxB read-locks obj2
ctxA goes deeper ctxB goes deeper
Now ctxC comes in who wants to start waiting on obj1, also
ctxD comes in who wants to start waiting on obj2.
ctxC tries to write-lock obj1 ctxD tries to write-lock obj2
ctxC is blocked ctxD is blocked
Now ctxA and ctxB resume their search:
ctxA tries to read-lock obj2 ctxB tries to read-lock obj1
If m_rwlock prefers writes (or fair) both ctxA and ctxB would be
blocked because of pending write locks from ctxD and ctxC
correspondingly. Thus we will get a deadlock in deadlock detector.
If m_wrlock prefers readers (actually ignoring pending writers is
enough) ctxA and ctxB will continue and no deadlock will occur.
*/
rw_lock_t m_rwlock;
rw_pr_lock_t m_rwlock;
bool is_empty() const
{
......@@ -213,12 +240,12 @@ public:
m_ref_release(0),
m_is_destroyed(FALSE)
{
my_rwlock_init(&m_rwlock, NULL);
rw_pr_init(&m_rwlock);
}
virtual ~MDL_lock()
{
rwlock_destroy(&m_rwlock);
rw_pr_destroy(&m_rwlock);
}
inline static void destroy(MDL_lock *lock);
public:
......@@ -480,7 +507,7 @@ bool MDL_map::move_from_hash_to_lock_mutex(MDL_lock *lock)
lock->m_ref_usage++;
mysql_mutex_unlock(&m_mutex);
rw_wrlock(&lock->m_rwlock);
rw_pr_wrlock(&lock->m_rwlock);
lock->m_ref_release++;
if (unlikely(lock->m_is_destroyed))
{
......@@ -495,7 +522,7 @@ bool MDL_map::move_from_hash_to_lock_mutex(MDL_lock *lock)
*/
uint ref_usage= lock->m_ref_usage;
uint ref_release= lock->m_ref_release;
rw_unlock(&lock->m_rwlock);
rw_pr_unlock(&lock->m_rwlock);
if (ref_usage == ref_release)
MDL_lock::destroy(lock);
return TRUE;
......@@ -538,7 +565,7 @@ void MDL_map::remove(MDL_lock *lock)
lock->m_is_destroyed= TRUE;
ref_usage= lock->m_ref_usage;
ref_release= lock->m_ref_release;
rw_unlock(&lock->m_rwlock);
rw_pr_unlock(&lock->m_rwlock);
mysql_mutex_unlock(&m_mutex);
if (ref_usage == ref_release)
MDL_lock::destroy(lock);
......@@ -559,7 +586,7 @@ MDL_context::MDL_context()
m_deadlock_weight(0),
m_signal(NO_WAKE_UP)
{
my_rwlock_init(&m_waiting_for_lock, NULL);
rw_pr_init(&m_waiting_for_lock);
mysql_mutex_init(NULL /* pfs key */, &m_signal_lock, NULL);
mysql_cond_init(NULL /* pfs key */, &m_signal_cond, NULL);
}
......@@ -581,7 +608,7 @@ void MDL_context::destroy()
{
DBUG_ASSERT(m_tickets.is_empty());
rwlock_destroy(&m_waiting_for_lock);
rw_pr_destroy(&m_waiting_for_lock);
mysql_mutex_destroy(&m_signal_lock);
mysql_cond_destroy(&m_signal_cond);
}
......@@ -1071,7 +1098,7 @@ MDL_lock::can_grant_lock(enum_mdl_type type_arg,
void MDL_lock::remove_ticket(Ticket_list MDL_lock::*list, MDL_ticket *ticket)
{
rw_wrlock(&m_rwlock);
rw_pr_wrlock(&m_rwlock);
(this->*list).remove_ticket(ticket);
if (is_empty())
mdl_locks.remove(this);
......@@ -1082,7 +1109,7 @@ void MDL_lock::remove_ticket(Ticket_list MDL_lock::*list, MDL_ticket *ticket)
which now might be able to do it. Wake them up!
*/
wake_up_waiters();
rw_unlock(&m_rwlock);
rw_pr_unlock(&m_rwlock);
}
}
......@@ -1102,9 +1129,9 @@ bool MDL_lock::has_pending_conflicting_lock(enum_mdl_type type)
mysql_mutex_assert_not_owner(&LOCK_open);
rw_rdlock(&m_rwlock);
rw_pr_rdlock(&m_rwlock);
result= (m_waiting.bitmap() & incompatible_granted_types_bitmap()[type]);
rw_unlock(&m_rwlock);
rw_pr_unlock(&m_rwlock);
return result;
}
......@@ -1298,7 +1325,7 @@ MDL_context::try_acquire_lock(MDL_request *mdl_request)
{
ticket->m_lock= lock;
lock->m_granted.add_ticket(ticket);
rw_unlock(&lock->m_rwlock);
rw_pr_unlock(&lock->m_rwlock);
m_tickets.push_front(ticket);
......@@ -1308,7 +1335,7 @@ MDL_context::try_acquire_lock(MDL_request *mdl_request)
{
/* We can't get here if we allocated a new lock. */
DBUG_ASSERT(! lock->is_empty());
rw_unlock(&lock->m_rwlock);
rw_pr_unlock(&lock->m_rwlock);
MDL_ticket::destroy(ticket);
}
......@@ -1349,9 +1376,9 @@ MDL_context::clone_ticket(MDL_request *mdl_request)
ticket->m_lock= mdl_request->ticket->m_lock;
mdl_request->ticket= ticket;
rw_wrlock(&ticket->m_lock->m_rwlock);
rw_pr_wrlock(&ticket->m_lock->m_rwlock);
ticket->m_lock->m_granted.add_ticket(ticket);
rw_unlock(&ticket->m_lock->m_rwlock);
rw_pr_unlock(&ticket->m_lock->m_rwlock);
m_tickets.push_front(ticket);
......@@ -1457,7 +1484,7 @@ bool MDL_context::acquire_lock_impl(MDL_request *mdl_request,
if (ticket->is_upgradable_or_exclusive())
lock->notify_shared_locks(this);
rw_unlock(&lock->m_rwlock);
rw_pr_unlock(&lock->m_rwlock);
set_deadlock_weight(mdl_request->get_deadlock_weight());
will_wait_for(ticket);
......@@ -1492,7 +1519,7 @@ bool MDL_context::acquire_lock_impl(MDL_request *mdl_request,
my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0));
return TRUE;
}
rw_wrlock(&lock->m_rwlock);
rw_pr_wrlock(&lock->m_rwlock);
}
lock->m_waiting.remove_ticket(ticket);
......@@ -1502,7 +1529,7 @@ bool MDL_context::acquire_lock_impl(MDL_request *mdl_request,
(*lock->cached_object_release_hook)(lock->cached_object);
lock->cached_object= NULL;
rw_unlock(&lock->m_rwlock);
rw_pr_unlock(&lock->m_rwlock);
m_tickets.push_front(ticket);
......@@ -1647,7 +1674,7 @@ MDL_context::upgrade_shared_lock_to_exclusive(MDL_ticket *mdl_ticket,
is_new_ticket= ! has_lock(mdl_svp, mdl_xlock_request.ticket);
/* Merge the acquired and the original lock. @todo: move to a method. */
rw_wrlock(&mdl_ticket->m_lock->m_rwlock);
rw_pr_wrlock(&mdl_ticket->m_lock->m_rwlock);
if (is_new_ticket)
mdl_ticket->m_lock->m_granted.remove_ticket(mdl_xlock_request.ticket);
/*
......@@ -1659,7 +1686,7 @@ MDL_context::upgrade_shared_lock_to_exclusive(MDL_ticket *mdl_ticket,
mdl_ticket->m_type= MDL_EXCLUSIVE;
mdl_ticket->m_lock->m_granted.add_ticket(mdl_ticket);
rw_unlock(&mdl_ticket->m_lock->m_rwlock);
rw_pr_unlock(&mdl_ticket->m_lock->m_rwlock);
if (is_new_ticket)
{
......@@ -1677,7 +1704,7 @@ bool MDL_lock::find_deadlock(MDL_ticket *waiting_ticket,
MDL_ticket *ticket;
bool result= FALSE;
rw_rdlock(&m_rwlock);
rw_pr_rdlock(&m_rwlock);
Ticket_iterator granted_it(m_granted);
Ticket_iterator waiting_it(m_waiting);
......@@ -1729,7 +1756,7 @@ bool MDL_lock::find_deadlock(MDL_ticket *waiting_ticket,
}
end:
rw_unlock(&m_rwlock);
rw_pr_unlock(&m_rwlock);
return result;
}
......@@ -1738,7 +1765,7 @@ bool MDL_context::find_deadlock(Deadlock_detection_context *deadlock_ctx)
{
bool result= FALSE;
rw_rdlock(&m_waiting_for_lock);
rw_pr_rdlock(&m_waiting_for_lock);
if (m_waiting_for)
{
......@@ -1767,14 +1794,14 @@ bool MDL_context::find_deadlock(Deadlock_detection_context *deadlock_ctx)
deadlock_ctx->victim= this;
else if (deadlock_ctx->victim->m_deadlock_weight >= m_deadlock_weight)
{
rw_unlock(&deadlock_ctx->victim->m_waiting_for_lock);
rw_pr_unlock(&deadlock_ctx->victim->m_waiting_for_lock);
deadlock_ctx->victim= this;
}
else
rw_unlock(&m_waiting_for_lock);
rw_pr_unlock(&m_waiting_for_lock);
}
else
rw_unlock(&m_waiting_for_lock);
rw_pr_unlock(&m_waiting_for_lock);
return result;
}
......@@ -1800,7 +1827,7 @@ bool MDL_context::find_deadlock()
if (deadlock_ctx.victim != this)
{
deadlock_ctx.victim->awake(VICTIM_WAKE_UP);
rw_unlock(&deadlock_ctx.victim->m_waiting_for_lock);
rw_pr_unlock(&deadlock_ctx.victim->m_waiting_for_lock);
/*
After adding new arc to waiting graph we found that it participates
in some loop (i.e. there is a deadlock). We decided to destroy this
......@@ -1813,7 +1840,7 @@ bool MDL_context::find_deadlock()
else
{
DBUG_ASSERT(&deadlock_ctx.victim->m_waiting_for_lock == &m_waiting_for_lock);
rw_unlock(&deadlock_ctx.victim->m_waiting_for_lock);
rw_pr_unlock(&deadlock_ctx.victim->m_waiting_for_lock);
return TRUE;
}
}
......@@ -1870,14 +1897,14 @@ MDL_context::wait_for_lock(MDL_request *mdl_request, ulong lock_wait_timeout)
if (lock->can_grant_lock(mdl_request->type, this))
{
rw_unlock(&lock->m_rwlock);
rw_pr_unlock(&lock->m_rwlock);
return FALSE;
}
MDL_ticket *pending_ticket;
if (! (pending_ticket= MDL_ticket::create(this, mdl_request->type)))
{
rw_unlock(&lock->m_rwlock);
rw_pr_unlock(&lock->m_rwlock);
return TRUE;
}
......@@ -1886,7 +1913,7 @@ MDL_context::wait_for_lock(MDL_request *mdl_request, ulong lock_wait_timeout)
lock->m_waiting.add_ticket(pending_ticket);
wait_reset();
rw_unlock(&lock->m_rwlock);
rw_pr_unlock(&lock->m_rwlock);
set_deadlock_weight(MDL_DEADLOCK_WEIGHT_DML);
will_wait_for(pending_ticket);
......@@ -2037,7 +2064,7 @@ void MDL_ticket::downgrade_exclusive_lock(enum_mdl_type type)
if (m_type != MDL_EXCLUSIVE)
return;
rw_wrlock(&m_lock->m_rwlock);
rw_pr_wrlock(&m_lock->m_rwlock);
/*
To update state of MDL_lock object correctly we need to temporarily
exclude ticket from the granted queue and then include it back.
......@@ -2046,7 +2073,7 @@ void MDL_ticket::downgrade_exclusive_lock(enum_mdl_type type)
m_type= type;
m_lock->m_granted.add_ticket(this);
m_lock->wake_up_waiters();
rw_unlock(&m_lock->m_rwlock);
rw_pr_unlock(&m_lock->m_rwlock);
}
......
......@@ -624,10 +624,11 @@ private:
/**
Read-write lock protecting m_waiting_for member.
TODO/FIXME: Replace with RW-lock which will prefer readers
on all platforms and not only on Linux.
@note The fact that this read-write lock prefers readers is
important as deadlock detector won't work correctly
otherwise. @sa Comment for MDL_lock::m_rwlock.
*/
rw_lock_t m_waiting_for_lock;
rw_pr_lock_t m_waiting_for_lock;
MDL_ticket *m_waiting_for;
uint m_deadlock_weight;
/**
......@@ -651,9 +652,9 @@ private:
void will_wait_for(MDL_ticket *pending_ticket)
{
rw_wrlock(&m_waiting_for_lock);
rw_pr_wrlock(&m_waiting_for_lock);
m_waiting_for= pending_ticket;
rw_unlock(&m_waiting_for_lock);
rw_pr_unlock(&m_waiting_for_lock);
}
void set_deadlock_weight(uint weight)
......@@ -669,9 +670,9 @@ private:
void stop_waiting()
{
rw_wrlock(&m_waiting_for_lock);
rw_pr_wrlock(&m_waiting_for_lock);
m_waiting_for= NULL;
rw_unlock(&m_waiting_for_lock);
rw_pr_unlock(&m_waiting_for_lock);
}
void wait_reset()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment