Commit e1a31e7f authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/qspinlock: store owner CPU in lock word

Store the owner CPU number in the lock word so it may be yielded to,
as powerpc's paravirtualised simple spinlocks do.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20221126095932.1234527-7-npiggin@gmail.com
parent 0944534e
...@@ -21,8 +21,15 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock) ...@@ -21,8 +21,15 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
return !!(READ_ONCE(lock->val) & _Q_TAIL_CPU_MASK); return !!(READ_ONCE(lock->val) & _Q_TAIL_CPU_MASK);
} }
static __always_inline u32 queued_spin_encode_locked_val(void)
{
/* XXX: make this use lock value in paca like simple spinlocks? */
return _Q_LOCKED_VAL | (smp_processor_id() << _Q_OWNER_CPU_OFFSET);
}
static __always_inline int queued_spin_trylock(struct qspinlock *lock) static __always_inline int queued_spin_trylock(struct qspinlock *lock)
{ {
u32 new = queued_spin_encode_locked_val();
u32 prev; u32 prev;
asm volatile( asm volatile(
...@@ -34,7 +41,7 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock) ...@@ -34,7 +41,7 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock)
"\t" PPC_ACQUIRE_BARRIER " \n" "\t" PPC_ACQUIRE_BARRIER " \n"
"2: \n" "2: \n"
: "=&r" (prev) : "=&r" (prev)
: "r" (&lock->val), "r" (_Q_LOCKED_VAL), : "r" (&lock->val), "r" (new),
"i" (IS_ENABLED(CONFIG_PPC64)) "i" (IS_ENABLED(CONFIG_PPC64))
: "cr0", "memory"); : "cr0", "memory");
...@@ -43,6 +50,7 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock) ...@@ -43,6 +50,7 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock)
static __always_inline int __queued_spin_trylock_steal(struct qspinlock *lock) static __always_inline int __queued_spin_trylock_steal(struct qspinlock *lock)
{ {
u32 new = queued_spin_encode_locked_val();
u32 prev, tmp; u32 prev, tmp;
/* Trylock may get ahead of queued nodes if it finds unlocked */ /* Trylock may get ahead of queued nodes if it finds unlocked */
...@@ -57,7 +65,7 @@ static __always_inline int __queued_spin_trylock_steal(struct qspinlock *lock) ...@@ -57,7 +65,7 @@ static __always_inline int __queued_spin_trylock_steal(struct qspinlock *lock)
"\t" PPC_ACQUIRE_BARRIER " \n" "\t" PPC_ACQUIRE_BARRIER " \n"
"2: \n" "2: \n"
: "=&r" (prev), "=&r" (tmp) : "=&r" (prev), "=&r" (tmp)
: "r" (&lock->val), "r" (_Q_LOCKED_VAL), "r" (_Q_TAIL_CPU_MASK), : "r" (&lock->val), "r" (new), "r" (_Q_TAIL_CPU_MASK),
"i" (IS_ENABLED(CONFIG_PPC64)) "i" (IS_ENABLED(CONFIG_PPC64))
: "cr0", "memory"); : "cr0", "memory");
......
...@@ -29,7 +29,8 @@ typedef struct qspinlock { ...@@ -29,7 +29,8 @@ typedef struct qspinlock {
* Bitfields in the lock word: * Bitfields in the lock word:
* *
* 0: locked bit * 0: locked bit
* 1-15: unused bits * 1-14: lock holder cpu
* 15: unused bit
* 16: must queue bit * 16: must queue bit
* 17-31: tail cpu (+1) * 17-31: tail cpu (+1)
*/ */
...@@ -40,6 +41,15 @@ typedef struct qspinlock { ...@@ -40,6 +41,15 @@ typedef struct qspinlock {
#define _Q_LOCKED_BITS 1 #define _Q_LOCKED_BITS 1
#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
/* 0x00007ffe */
#define _Q_OWNER_CPU_OFFSET 1
#define _Q_OWNER_CPU_BITS 14
#define _Q_OWNER_CPU_MASK _Q_SET_MASK(OWNER_CPU)
#if CONFIG_NR_CPUS > (1U << _Q_OWNER_CPU_BITS)
#error "qspinlock does not support such large CONFIG_NR_CPUS"
#endif
/* 0x00010000 */ /* 0x00010000 */
#define _Q_MUST_Q_OFFSET 16 #define _Q_MUST_Q_OFFSET 16
#define _Q_MUST_Q_BITS 1 #define _Q_MUST_Q_BITS 1
......
...@@ -55,7 +55,7 @@ static inline int decode_tail_cpu(u32 val) ...@@ -55,7 +55,7 @@ static inline int decode_tail_cpu(u32 val)
*/ */
static __always_inline u32 trylock_clean_tail(struct qspinlock *lock, u32 tail) static __always_inline u32 trylock_clean_tail(struct qspinlock *lock, u32 tail)
{ {
u32 newval = _Q_LOCKED_VAL; u32 newval = queued_spin_encode_locked_val();
u32 prev, tmp; u32 prev, tmp;
asm volatile( asm volatile(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment