Commit 9321f815 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Peter Zijlstra

rtmutex: Wake up the waiters lockless while dropping the read lock.

The rw_semaphore and rwlock_t implementation both wake the waiter while
holding the rt_mutex_base::wait_lock acquired.
This can be optimized by waking the waiter lockless outside of the
locked section to avoid a needless contention on the
rt_mutex_base::wait_lock lock.

Extend rt_mutex_wake_q_add() to also accept task and state and use it in
__rwbase_read_unlock().
Suggested-by: default avatarDavidlohr Bueso <dave@stgolabs.net>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210928150006.597310-3-bigeasy@linutronix.de
parent 8fe46535
...@@ -446,19 +446,26 @@ static __always_inline void rt_mutex_adjust_prio(struct task_struct *p) ...@@ -446,19 +446,26 @@ static __always_inline void rt_mutex_adjust_prio(struct task_struct *p)
} }
/* RT mutex specific wake_q wrappers */ /* RT mutex specific wake_q wrappers */
static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh, static __always_inline void rt_mutex_wake_q_add_task(struct rt_wake_q_head *wqh,
struct rt_mutex_waiter *w) struct task_struct *task,
unsigned int wake_state)
{ {
if (IS_ENABLED(CONFIG_PREEMPT_RT) && w->wake_state == TASK_RTLOCK_WAIT) { if (IS_ENABLED(CONFIG_PREEMPT_RT) && wake_state == TASK_RTLOCK_WAIT) {
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) if (IS_ENABLED(CONFIG_PROVE_LOCKING))
WARN_ON_ONCE(wqh->rtlock_task); WARN_ON_ONCE(wqh->rtlock_task);
get_task_struct(w->task); get_task_struct(task);
wqh->rtlock_task = w->task; wqh->rtlock_task = task;
} else { } else {
wake_q_add(&wqh->head, w->task); wake_q_add(&wqh->head, task);
} }
} }
static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh,
struct rt_mutex_waiter *w)
{
rt_mutex_wake_q_add_task(wqh, w->task, w->wake_state);
}
static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh) static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
{ {
if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) { if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) {
......
...@@ -141,6 +141,7 @@ static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb, ...@@ -141,6 +141,7 @@ static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
{ {
struct rt_mutex_base *rtm = &rwb->rtmutex; struct rt_mutex_base *rtm = &rwb->rtmutex;
struct task_struct *owner; struct task_struct *owner;
DEFINE_RT_WAKE_Q(wqh);
raw_spin_lock_irq(&rtm->wait_lock); raw_spin_lock_irq(&rtm->wait_lock);
/* /*
...@@ -151,9 +152,12 @@ static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb, ...@@ -151,9 +152,12 @@ static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
*/ */
owner = rt_mutex_owner(rtm); owner = rt_mutex_owner(rtm);
if (owner) if (owner)
wake_up_state(owner, state); rt_mutex_wake_q_add_task(&wqh, owner, state);
/* Pairs with the preempt_enable in rt_mutex_wake_up_q() */
preempt_disable();
raw_spin_unlock_irq(&rtm->wait_lock); raw_spin_unlock_irq(&rtm->wait_lock);
rt_mutex_wake_up_q(&wqh);
} }
static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb, static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment