Commit c6eb3dda authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

mutex: Use p->on_cpu for the adaptive spin

Since we now have p->on_cpu unconditionally available, use it to
re-implement mutex_spin_on_owner.
Requested-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarFrank Rowand <frank.rowand@am.sony.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110405152728.826338173@chello.nl
parent 3ca7a440
...@@ -51,7 +51,7 @@ struct mutex { ...@@ -51,7 +51,7 @@ struct mutex {
spinlock_t wait_lock; spinlock_t wait_lock;
struct list_head wait_list; struct list_head wait_list;
#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
struct thread_info *owner; struct task_struct *owner;
#endif #endif
#ifdef CONFIG_DEBUG_MUTEXES #ifdef CONFIG_DEBUG_MUTEXES
const char *name; const char *name;
......
...@@ -360,7 +360,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout); ...@@ -360,7 +360,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout); extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout); extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void schedule(void); asmlinkage void schedule(void);
extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
struct nsproxy; struct nsproxy;
struct user_namespace; struct user_namespace;
......
...@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock) ...@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
return; return;
DEBUG_LOCKS_WARN_ON(lock->magic != lock); DEBUG_LOCKS_WARN_ON(lock->magic != lock);
DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); DEBUG_LOCKS_WARN_ON(lock->owner != current);
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
mutex_clear_owner(lock); mutex_clear_owner(lock);
} }
......
...@@ -29,7 +29,7 @@ extern void debug_mutex_init(struct mutex *lock, const char *name, ...@@ -29,7 +29,7 @@ extern void debug_mutex_init(struct mutex *lock, const char *name,
static inline void mutex_set_owner(struct mutex *lock) static inline void mutex_set_owner(struct mutex *lock)
{ {
lock->owner = current_thread_info(); lock->owner = current;
} }
static inline void mutex_clear_owner(struct mutex *lock) static inline void mutex_clear_owner(struct mutex *lock)
......
...@@ -160,7 +160,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -160,7 +160,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
*/ */
for (;;) { for (;;) {
struct thread_info *owner; struct task_struct *owner;
/* /*
* If we own the BKL, then don't spin. The owner of * If we own the BKL, then don't spin. The owner of
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static inline void mutex_set_owner(struct mutex *lock) static inline void mutex_set_owner(struct mutex *lock)
{ {
lock->owner = current_thread_info(); lock->owner = current;
} }
static inline void mutex_clear_owner(struct mutex *lock) static inline void mutex_clear_owner(struct mutex *lock)
......
...@@ -4173,70 +4173,53 @@ asmlinkage void __sched schedule(void) ...@@ -4173,70 +4173,53 @@ asmlinkage void __sched schedule(void)
EXPORT_SYMBOL(schedule); EXPORT_SYMBOL(schedule);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
/*
* Look out! "owner" is an entirely speculative pointer
* access and not reliable.
*/
int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
{
unsigned int cpu;
struct rq *rq;
if (!sched_feat(OWNER_SPIN)) static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
return 0; {
bool ret = false;
#ifdef CONFIG_DEBUG_PAGEALLOC rcu_read_lock();
/* if (lock->owner != owner)
* Need to access the cpu field knowing that goto fail;
* DEBUG_PAGEALLOC could have unmapped it if
* the mutex owner just released it and exited.
*/
if (probe_kernel_address(&owner->cpu, cpu))
return 0;
#else
cpu = owner->cpu;
#endif
/* /*
* Even if the access succeeded (likely case), * Ensure we emit the owner->on_cpu, dereference _after_ checking
* the cpu field may no longer be valid. * lock->owner still matches owner, if that fails, owner might
* point to free()d memory, if it still matches, the rcu_read_lock()
* ensures the memory stays valid.
*/ */
if (cpu >= nr_cpumask_bits) barrier();
return 0;
/* ret = owner->on_cpu;
* We need to validate that we can do a fail:
* get_cpu() and that we have the percpu area. rcu_read_unlock();
*/
if (!cpu_online(cpu))
return 0;
rq = cpu_rq(cpu); return ret;
}
for (;;) { /*
/* * Look out! "owner" is an entirely speculative pointer
* Owner changed, break to re-assess state. * access and not reliable.
*/ */
if (lock->owner != owner) { int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
/* {
* If the lock has switched to a different owner, if (!sched_feat(OWNER_SPIN))
* we likely have heavy contention. Return 0 to quit return 0;
* optimistic spinning and not contend further:
*/
if (lock->owner)
return 0;
break;
}
/* while (owner_running(lock, owner)) {
* Is that owner really running on that cpu? if (need_resched())
*/
if (task_thread_info(rq->curr) != owner || need_resched())
return 0; return 0;
arch_mutex_cpu_relax(); arch_mutex_cpu_relax();
} }
/*
* If the owner changed to another task there is likely
* heavy contention, stop spinning.
*/
if (lock->owner)
return 0;
return 1; return 1;
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment