Commit 9786cff3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Ingo Molnar:
 "Spinlock performance regression fix, plus documentation fixes"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/static_keys: Fix up the static keys documentation
  locking/qspinlock/x86: Only emit the test-and-set fallback when building guest support
  locking/qspinlock/x86: Fix performance regression under unaccelerated VMs
  locking/static_keys: Fix a silly typo
parents 1b3dfde3 1975dbc2
...@@ -15,8 +15,8 @@ The updated API replacements are: ...@@ -15,8 +15,8 @@ The updated API replacements are:
DEFINE_STATIC_KEY_TRUE(key); DEFINE_STATIC_KEY_TRUE(key);
DEFINE_STATIC_KEY_FALSE(key); DEFINE_STATIC_KEY_FALSE(key);
static_key_likely() static_branch_likely()
statick_key_unlikely() static_branch_unlikely()
0) Abstract 0) Abstract
......
...@@ -39,18 +39,27 @@ static inline void queued_spin_unlock(struct qspinlock *lock) ...@@ -39,18 +39,27 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
} }
#endif #endif
#define virt_queued_spin_lock virt_queued_spin_lock #ifdef CONFIG_PARAVIRT
#define virt_spin_lock virt_spin_lock
static inline bool virt_queued_spin_lock(struct qspinlock *lock) static inline bool virt_spin_lock(struct qspinlock *lock)
{ {
if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
return false; return false;
while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0) /*
cpu_relax(); * On hypervisors without PARAVIRT_SPINLOCKS support we fall
* back to a Test-and-Set spinlock, because fair locks have
* horrible lock 'holder' preemption issues.
*/
do {
while (atomic_read(&lock->val) != 0)
cpu_relax();
} while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
return true; return true;
} }
#endif /* CONFIG_PARAVIRT */
#include <asm-generic/qspinlock.h> #include <asm-generic/qspinlock.h>
......
...@@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock) ...@@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
cpu_relax(); cpu_relax();
} }
#ifndef virt_queued_spin_lock #ifndef virt_spin_lock
static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock) static __always_inline bool virt_spin_lock(struct qspinlock *lock)
{ {
return false; return false;
} }
......
...@@ -21,8 +21,8 @@ ...@@ -21,8 +21,8 @@
* *
* DEFINE_STATIC_KEY_TRUE(key); * DEFINE_STATIC_KEY_TRUE(key);
* DEFINE_STATIC_KEY_FALSE(key); * DEFINE_STATIC_KEY_FALSE(key);
* static_key_likely() * static_branch_likely()
* statick_key_unlikely() * static_branch_unlikely()
* *
* Jump labels provide an interface to generate dynamic branches using * Jump labels provide an interface to generate dynamic branches using
* self-modifying code. Assuming toolchain and architecture support, if we * self-modifying code. Assuming toolchain and architecture support, if we
...@@ -45,12 +45,10 @@ ...@@ -45,12 +45,10 @@
* statement, setting the key to true requires us to patch in a jump * statement, setting the key to true requires us to patch in a jump
* to the out-of-line of true branch. * to the out-of-line of true branch.
* *
* In addtion to static_branch_{enable,disable}, we can also reference count * In addition to static_branch_{enable,disable}, we can also reference count
* the key or branch direction via static_branch_{inc,dec}. Thus, * the key or branch direction via static_branch_{inc,dec}. Thus,
* static_branch_inc() can be thought of as a 'make more true' and * static_branch_inc() can be thought of as a 'make more true' and
* static_branch_dec() as a 'make more false'. The inc()/dec() * static_branch_dec() as a 'make more false'.
* interface is meant to be used exclusively from the inc()/dec() for a given
* key.
* *
* Since this relies on modifying code, the branch modifying functions * Since this relies on modifying code, the branch modifying functions
* must be considered absolute slow paths (machine wide synchronization etc.). * must be considered absolute slow paths (machine wide synchronization etc.).
......
...@@ -289,7 +289,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) ...@@ -289,7 +289,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
if (pv_enabled()) if (pv_enabled())
goto queue; goto queue;
if (virt_queued_spin_lock(lock)) if (virt_spin_lock(lock))
return; return;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment