Commit 43b3f028 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking/qspinlock/x86: Fix performance regression under unaccelerated VMs

Dave ran into horrible performance on a VM without PARAVIRT_SPINLOCKS
set and Linus noted that the test-and-set implementation was retarded.

One should spin on the variable with a load, not a RMW.

While there, remove 'queued' from the name, as the lock isn't queued
at all, but a simple test-and-set.
Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Reported-by: default avatarDave Chinner <david@fromorbit.com>
Tested-by: default avatarDave Chinner <david@fromorbit.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <Waiman.Long@hp.com>
Cc: stable@vger.kernel.org # v4.2+
Link: http://lkml.kernel.org/r/20150904152523.GR18673@twins.programming.kicks-ass.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent edcd591c
...@@ -39,15 +39,23 @@ static inline void queued_spin_unlock(struct qspinlock *lock) ...@@ -39,15 +39,23 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
} }
#endif #endif
#define virt_queued_spin_lock virt_queued_spin_lock #define virt_spin_lock virt_spin_lock
static inline bool virt_queued_spin_lock(struct qspinlock *lock) static inline bool virt_spin_lock(struct qspinlock *lock)
{ {
if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
return false; return false;
while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0) /*
cpu_relax(); * On hypervisors without PARAVIRT_SPINLOCKS support we fall
* back to a Test-and-Set spinlock, because fair locks have
* horrible lock 'holder' preemption issues.
*/
do {
while (atomic_read(&lock->val) != 0)
cpu_relax();
} while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
return true; return true;
} }
......
...@@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock) ...@@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
cpu_relax(); cpu_relax();
} }
#ifndef virt_queued_spin_lock #ifndef virt_spin_lock
static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock) static __always_inline bool virt_spin_lock(struct qspinlock *lock)
{ {
return false; return false;
} }
......
...@@ -289,7 +289,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) ...@@ -289,7 +289,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
if (pv_enabled()) if (pv_enabled())
goto queue; goto queue;
if (virt_queued_spin_lock(lock)) if (virt_spin_lock(lock))
return; return;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment