Commit ebbdc41e authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

locking/rtmutex: Provide rt_mutex_slowlock_locked()

Split the inner workings of rt_mutex_slowlock() out into a separate
function, which can be reused by the upcoming RT lock substitutions,
e.g. for rw_semaphores.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210815211302.841971086@linutronix.de
parent 830e6acc
...@@ -1106,7 +1106,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock, ...@@ -1106,7 +1106,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock,
} }
/** /**
* __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
* @lock: the rt_mutex to take * @lock: the rt_mutex to take
* @state: the state the task should block in (TASK_INTERRUPTIBLE * @state: the state the task should block in (TASK_INTERRUPTIBLE
* or TASK_UNINTERRUPTIBLE) * or TASK_UNINTERRUPTIBLE)
...@@ -1115,10 +1115,10 @@ static void __sched remove_waiter(struct rt_mutex_base *lock, ...@@ -1115,10 +1115,10 @@ static void __sched remove_waiter(struct rt_mutex_base *lock,
* *
* Must be called with lock->wait_lock held and interrupts disabled * Must be called with lock->wait_lock held and interrupts disabled
*/ */
static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
unsigned int state, unsigned int state,
struct hrtimer_sleeper *timeout, struct hrtimer_sleeper *timeout,
struct rt_mutex_waiter *waiter) struct rt_mutex_waiter *waiter)
{ {
int ret = 0; int ret = 0;
...@@ -1168,52 +1168,37 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, ...@@ -1168,52 +1168,37 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
} }
} }
/* /**
* Slow path lock function: * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
* @lock: The rtmutex to block lock
* @state: The task state for sleeping
* @chwalk: Indicator whether full or partial chainwalk is requested
* @waiter: Initializer waiter for blocking
*/ */
static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
unsigned int state, unsigned int state,
struct hrtimer_sleeper *timeout, enum rtmutex_chainwalk chwalk,
enum rtmutex_chainwalk chwalk) struct rt_mutex_waiter *waiter)
{ {
struct rt_mutex_waiter waiter; int ret;
unsigned long flags;
int ret = 0;
rt_mutex_init_waiter(&waiter);
/* lockdep_assert_held(&lock->wait_lock);
* Technically we could use raw_spin_[un]lock_irq() here, but this can
* be called in early boot if the cmpxchg() fast path is disabled
* (debug, no architecture support). In this case we will acquire the
* rtmutex with lock->wait_lock held. But we cannot unconditionally
* enable interrupts in that early boot case. So we need to use the
* irqsave/restore variants.
*/
raw_spin_lock_irqsave(&lock->wait_lock, flags);
/* Try to acquire the lock again: */ /* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock, current, NULL)) { if (try_to_take_rt_mutex(lock, current, NULL))
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return 0; return 0;
}
set_current_state(state); set_current_state(state);
/* Setup the timer, when timeout != NULL */ ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
if (unlikely(timeout))
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
if (likely(!ret)) if (likely(!ret))
/* sleep on the mutex */ ret = rt_mutex_slowlock_block(lock, state, NULL, waiter);
ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
if (unlikely(ret)) { if (unlikely(ret)) {
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
remove_waiter(lock, &waiter); remove_waiter(lock, waiter);
rt_mutex_handle_deadlock(ret, chwalk, &waiter); rt_mutex_handle_deadlock(ret, chwalk, waiter);
} }
/* /*
...@@ -1221,14 +1206,45 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, ...@@ -1221,14 +1206,45 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
* unconditionally. We might have to fix that up. * unconditionally. We might have to fix that up.
*/ */
fixup_rt_mutex_waiters(lock); fixup_rt_mutex_waiters(lock);
return ret;
}
raw_spin_unlock_irqrestore(&lock->wait_lock, flags); static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
unsigned int state)
{
struct rt_mutex_waiter waiter;
int ret;
rt_mutex_init_waiter(&waiter);
/* Remove pending timer: */ ret = __rt_mutex_slowlock(lock, state, RT_MUTEX_MIN_CHAINWALK, &waiter);
if (unlikely(timeout))
hrtimer_cancel(&timeout->timer);
debug_rt_mutex_free_waiter(&waiter); debug_rt_mutex_free_waiter(&waiter);
return ret;
}
/*
* rt_mutex_slowlock - Locking slowpath invoked when fast path fails
* @lock: The rtmutex to block lock
* @state: The task state for sleeping
*/
static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
unsigned int state)
{
unsigned long flags;
int ret;
/*
* Technically we could use raw_spin_[un]lock_irq() here, but this can
* be called in early boot if the cmpxchg() fast path is disabled
* (debug, no architecture support). In this case we will acquire the
* rtmutex with lock->wait_lock held. But we cannot unconditionally
* enable interrupts in that early boot case. So we need to use the
* irqsave/restore variants.
*/
raw_spin_lock_irqsave(&lock->wait_lock, flags);
ret = __rt_mutex_slowlock_locked(lock, state);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return ret; return ret;
} }
...@@ -1239,7 +1255,7 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, ...@@ -1239,7 +1255,7 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
return 0; return 0;
return rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); return rt_mutex_slowlock(lock, state);
} }
static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock) static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
......
...@@ -342,7 +342,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock, ...@@ -342,7 +342,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
raw_spin_lock_irq(&lock->wait_lock); raw_spin_lock_irq(&lock->wait_lock);
/* sleep on the mutex */ /* sleep on the mutex */
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); ret = rt_mutex_slowlock_block(lock, TASK_INTERRUPTIBLE, to, waiter);
/* /*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up. * have to fix that up.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment