Commit fea0e182 authored by Uros Bizjak's avatar Uros Bizjak Committed by Ingo Molnar

locking/pvqspinlock: Use try_cmpxchg() in qspinlock_paravirt.h

Use try_cmpxchg(*ptr, &old, new) instead of
cmpxchg(*ptr, old, new) == old in qspinlock_paravirt.h
x86 CMPXCHG instruction returns success in ZF flag, so
this change saves a compare after cmpxchg.

No functional change intended.
Signed-off-by: default avatarUros Bizjak <ubizjak@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Reviewed-by: default avatarWaiman Long <longman@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/r/20240411192317.25432-2-ubizjak@gmail.com
parent 6a97734f
...@@ -86,9 +86,10 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) ...@@ -86,9 +86,10 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
*/ */
for (;;) { for (;;) {
int val = atomic_read(&lock->val); int val = atomic_read(&lock->val);
u8 old = 0;
if (!(val & _Q_LOCKED_PENDING_MASK) && if (!(val & _Q_LOCKED_PENDING_MASK) &&
(cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) { try_cmpxchg_acquire(&lock->locked, &old, _Q_LOCKED_VAL)) {
lockevent_inc(pv_lock_stealing); lockevent_inc(pv_lock_stealing);
return true; return true;
} }
...@@ -211,8 +212,9 @@ static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) ...@@ -211,8 +212,9 @@ static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
int hopcnt = 0; int hopcnt = 0;
for_each_hash_entry(he, offset, hash) { for_each_hash_entry(he, offset, hash) {
struct qspinlock *old = NULL;
hopcnt++; hopcnt++;
if (!cmpxchg(&he->lock, NULL, lock)) { if (try_cmpxchg(&he->lock, &old, lock)) {
WRITE_ONCE(he->node, node); WRITE_ONCE(he->node, node);
lockevent_pv_hop(hopcnt); lockevent_pv_hop(hopcnt);
return &he->lock; return &he->lock;
...@@ -355,7 +357,7 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) ...@@ -355,7 +357,7 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
{ {
struct pv_node *pn = (struct pv_node *)node; struct pv_node *pn = (struct pv_node *)node;
enum vcpu_state old = vcpu_halted;
/* /*
* If the vCPU is indeed halted, advance its state to match that of * If the vCPU is indeed halted, advance its state to match that of
* pv_wait_node(). If OTOH this fails, the vCPU was running and will * pv_wait_node(). If OTOH this fails, the vCPU was running and will
...@@ -372,8 +374,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) ...@@ -372,8 +374,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
* subsequent writes. * subsequent writes.
*/ */
smp_mb__before_atomic(); smp_mb__before_atomic();
if (cmpxchg_relaxed(&pn->state, vcpu_halted, vcpu_hashed) if (!try_cmpxchg_relaxed(&pn->state, &old, vcpu_hashed))
!= vcpu_halted)
return; return;
/* /*
...@@ -541,15 +542,14 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) ...@@ -541,15 +542,14 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
#ifndef __pv_queued_spin_unlock #ifndef __pv_queued_spin_unlock
__visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock) __visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock)
{ {
u8 locked; u8 locked = _Q_LOCKED_VAL;
/* /*
* We must not unlock if SLOW, because in that case we must first * We must not unlock if SLOW, because in that case we must first
* unhash. Otherwise it would be possible to have multiple @lock * unhash. Otherwise it would be possible to have multiple @lock
* entries, which would be BAD. * entries, which would be BAD.
*/ */
locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0); if (try_cmpxchg_release(&lock->locked, &locked, 0))
if (likely(locked == _Q_LOCKED_VAL))
return; return;
__pv_queued_spin_unlock_slowpath(lock, locked); __pv_queued_spin_unlock_slowpath(lock, locked);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment