Commit c1fb159d authored by Peter Zijlstra (Intel)'s avatar Peter Zijlstra (Intel) Committed by Ingo Molnar

locking/qspinlock: Add pending bit

Because the qspinlock needs to touch a second cacheline (the per-cpu
mcs_nodes[]); add a pending bit and allow a single in-word spinner
before we punt to the second cacheline.

It is possible so observe the pending bit without the locked bit when
the last owner has just released but the pending owner has not yet
taken ownership.

In this case we would normally queue -- because the pending bit is
already taken. However, in this case the pending bit is guaranteed
to be released 'soon', therefore wait for it and avoid queueing.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarWaiman Long <Waiman.Long@hp.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Daniel J Blueman <daniel@numascale.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Douglas Hatch <doug.hatch@hp.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paolo Bonzini <paolo.bonzini@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1429901803-29771-4-git-send-email-Waiman.Long@hp.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent d73a3397
...@@ -36,8 +36,9 @@ typedef struct qspinlock { ...@@ -36,8 +36,9 @@ typedef struct qspinlock {
* Bitfields in the atomic value: * Bitfields in the atomic value:
* *
* 0- 7: locked byte * 0- 7: locked byte
* 8- 9: tail index * 8: pending
* 10-31: tail cpu (+1) * 9-10: tail index
* 11-31: tail cpu (+1)
*/ */
#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\ #define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
<< _Q_ ## type ## _OFFSET) << _Q_ ## type ## _OFFSET)
...@@ -45,7 +46,11 @@ typedef struct qspinlock { ...@@ -45,7 +46,11 @@ typedef struct qspinlock {
#define _Q_LOCKED_BITS 8 #define _Q_LOCKED_BITS 8
#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED) #define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
#define _Q_TAIL_IDX_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS) #define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
#define _Q_PENDING_BITS 1
#define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
#define _Q_TAIL_IDX_BITS 2 #define _Q_TAIL_IDX_BITS 2
#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX) #define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
...@@ -54,5 +59,6 @@ typedef struct qspinlock { ...@@ -54,5 +59,6 @@ typedef struct qspinlock {
#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ #endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */
...@@ -94,24 +94,28 @@ static inline struct mcs_spinlock *decode_tail(u32 tail) ...@@ -94,24 +94,28 @@ static inline struct mcs_spinlock *decode_tail(u32 tail)
return per_cpu_ptr(&mcs_nodes[idx], cpu); return per_cpu_ptr(&mcs_nodes[idx], cpu);
} }
#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
/** /**
* queued_spin_lock_slowpath - acquire the queued spinlock * queued_spin_lock_slowpath - acquire the queued spinlock
* @lock: Pointer to queued spinlock structure * @lock: Pointer to queued spinlock structure
* @val: Current value of the queued spinlock 32-bit word * @val: Current value of the queued spinlock 32-bit word
* *
* (queue tail, lock value) * (queue tail, pending bit, lock value)
* *
* fast : slow : unlock * fast : slow : unlock
* : : * : :
* uncontended (0,0) --:--> (0,1) --------------------------------:--> (*,0) * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
* : | ^--------. / : * : | ^--------.------. / :
* : v \ | : * : v \ \ | :
* uncontended : (n,x) --+--> (n,0) | : * pending : (0,1,1) +--> (0,1,0) \ | :
* : | ^--' | | :
* : v | | :
* uncontended : (n,x,y) +--> (n,0,0) --' | :
* queue : | ^--' | : * queue : | ^--' | :
* : v | : * : v | :
* contended : (*,x) --+--> (*,0) -----> (*,1) ---' : * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' :
* queue : ^--' : * queue : ^--' :
*
*/ */
void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{ {
...@@ -121,6 +125,75 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) ...@@ -121,6 +125,75 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
/*
* wait for in-progress pending->locked hand-overs
*
* 0,1,0 -> 0,0,1
*/
if (val == _Q_PENDING_VAL) {
while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL)
cpu_relax();
}
/*
* trylock || pending
*
* 0,0,0 -> 0,0,1 ; trylock
* 0,0,1 -> 0,1,1 ; pending
*/
for (;;) {
/*
* If we observe any contention; queue.
*/
if (val & ~_Q_LOCKED_MASK)
goto queue;
new = _Q_LOCKED_VAL;
if (val == new)
new |= _Q_PENDING_VAL;
old = atomic_cmpxchg(&lock->val, val, new);
if (old == val)
break;
val = old;
}
/*
* we won the trylock
*/
if (new == _Q_LOCKED_VAL)
return;
/*
* we're pending, wait for the owner to go away.
*
* *,1,1 -> *,1,0
*/
while ((val = atomic_read(&lock->val)) & _Q_LOCKED_MASK)
cpu_relax();
/*
* take ownership and clear the pending bit.
*
* *,1,0 -> *,0,1
*/
for (;;) {
new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
old = atomic_cmpxchg(&lock->val, val, new);
if (old == val)
break;
val = old;
}
return;
/*
* End of pending bit optimistic spinning and beginning of MCS
* queuing.
*/
queue:
node = this_cpu_ptr(&mcs_nodes[0]); node = this_cpu_ptr(&mcs_nodes[0]);
idx = node->count++; idx = node->count++;
tail = encode_tail(smp_processor_id(), idx); tail = encode_tail(smp_processor_id(), idx);
...@@ -130,15 +203,18 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) ...@@ -130,15 +203,18 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
node->next = NULL; node->next = NULL;
/* /*
* We have already touched the queueing cacheline; don't bother with
* pending stuff.
*
* trylock || xchg(lock, node) * trylock || xchg(lock, node)
* *
* 0,0 -> 0,1 ; no tail, not locked -> no tail, locked. * 0,0,0 -> 0,0,1 ; no tail, not locked -> no tail, locked.
* p,x -> n,x ; tail was p -> tail is n; preserving locked. * p,y,x -> n,y,x ; tail was p -> tail is n; preserving locked.
*/ */
for (;;) { for (;;) {
new = _Q_LOCKED_VAL; new = _Q_LOCKED_VAL;
if (val) if (val)
new = tail | (val & _Q_LOCKED_MASK); new = tail | (val & _Q_LOCKED_PENDING_MASK);
old = atomic_cmpxchg(&lock->val, val, new); old = atomic_cmpxchg(&lock->val, val, new);
if (old == val) if (old == val)
...@@ -157,7 +233,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) ...@@ -157,7 +233,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
* if there was a previous node; link it and wait until reaching the * if there was a previous node; link it and wait until reaching the
* head of the waitqueue. * head of the waitqueue.
*/ */
if (old & ~_Q_LOCKED_MASK) { if (old & ~_Q_LOCKED_PENDING_MASK) {
prev = decode_tail(old); prev = decode_tail(old);
WRITE_ONCE(prev->next, node); WRITE_ONCE(prev->next, node);
...@@ -165,18 +241,19 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) ...@@ -165,18 +241,19 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
} }
/* /*
* we're at the head of the waitqueue, wait for the owner to go away. * we're at the head of the waitqueue, wait for the owner & pending to
* go away.
* *
* *,x -> *,0 * *,x,y -> *,0,0
*/ */
while ((val = atomic_read(&lock->val)) & _Q_LOCKED_MASK) while ((val = atomic_read(&lock->val)) & _Q_LOCKED_PENDING_MASK)
cpu_relax(); cpu_relax();
/* /*
* claim the lock: * claim the lock:
* *
* n,0 -> 0,1 : lock, uncontended * n,0,0 -> 0,0,1 : lock, uncontended
* *,0 -> *,1 : lock, contended * *,0,0 -> *,0,1 : lock, contended
*/ */
for (;;) { for (;;) {
new = _Q_LOCKED_VAL; new = _Q_LOCKED_VAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment