Commit 307bf980 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

sched: Simplify mutex_spin_on_owner()

It does not make sense to rcu_read_lock/unlock() in every loop
iteration while spinning on the mutex.

Move the rcu protection outside the loop. Also simplify the
return path to always check for lock->owner == NULL which
meets the requirements of both owner changed and need_resched()
caused loop exits.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1106101458350.11814@ionosSigned-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 2a46dae3
......@@ -4306,11 +4306,8 @@ EXPORT_SYMBOL(schedule);
static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
{
bool ret = false;
rcu_read_lock();
if (lock->owner != owner)
goto fail;
return false;
/*
* Ensure we emit the owner->on_cpu, dereference _after_ checking
......@@ -4320,11 +4317,7 @@ static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
*/
barrier();
ret = owner->on_cpu;
fail:
rcu_read_unlock();
return ret;
return owner->on_cpu;
}
/*
......@@ -4336,21 +4329,21 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
if (!sched_feat(OWNER_SPIN))
return 0;
rcu_read_lock();
while (owner_running(lock, owner)) {
if (need_resched())
return 0;
break;
arch_mutex_cpu_relax();
}
rcu_read_unlock();
/*
* If the owner changed to another task there is likely
* heavy contention, stop spinning.
* We break out the loop above on need_resched() and when the
* owner changed, which is a sign for heavy contention. Return
* success only when lock->owner is NULL.
*/
if (lock->owner)
return 0;
return 1;
return lock->owner == NULL;
}
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment