Commit d57b7835 authored by Christopher M. Riedl's avatar Christopher M. Riedl Committed by Michael Ellerman

powerpc/spinlocks: Refactor SHARED_PROCESSOR

Determining if a processor is in shared processor mode is not a constant
so don't hide it behind a #define.
Signed-off-by: default avatarChristopher M. Riedl <cmr@informatik.wtf>
Reviewed-by: default avatarAndrew Donnellan <ajd@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190813031314.1828-2-cmr@informatik.wtf
parent d7fb5b18
...@@ -101,15 +101,27 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) ...@@ -101,15 +101,27 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
#if defined(CONFIG_PPC_SPLPAR) #if defined(CONFIG_PPC_SPLPAR)
/* We only yield to the hypervisor if we are in shared processor mode */ /* We only yield to the hypervisor if we are in shared processor mode */
#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
extern void __spin_yield(arch_spinlock_t *lock); extern void __spin_yield(arch_spinlock_t *lock);
extern void __rw_yield(arch_rwlock_t *lock); extern void __rw_yield(arch_rwlock_t *lock);
#else /* SPLPAR */ #else /* SPLPAR */
#define __spin_yield(x) barrier() #define __spin_yield(x) barrier()
#define __rw_yield(x) barrier() #define __rw_yield(x) barrier()
#define SHARED_PROCESSOR 0
#endif #endif
static inline bool is_shared_processor(void)
{
/*
* LPPACA is only available on Pseries so guard anything LPPACA related to
* allow other platforms (which include this common header) to compile.
*/
#ifdef CONFIG_PPC_PSERIES
return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
lppaca_shared_proc(local_paca->lppaca_ptr));
#else
return false;
#endif
}
static inline void arch_spin_lock(arch_spinlock_t *lock) static inline void arch_spin_lock(arch_spinlock_t *lock)
{ {
while (1) { while (1) {
...@@ -117,7 +129,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) ...@@ -117,7 +129,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
break; break;
do { do {
HMT_low(); HMT_low();
if (SHARED_PROCESSOR) if (is_shared_processor())
__spin_yield(lock); __spin_yield(lock);
} while (unlikely(lock->slock != 0)); } while (unlikely(lock->slock != 0));
HMT_medium(); HMT_medium();
...@@ -136,7 +148,7 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) ...@@ -136,7 +148,7 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
local_irq_restore(flags); local_irq_restore(flags);
do { do {
HMT_low(); HMT_low();
if (SHARED_PROCESSOR) if (is_shared_processor())
__spin_yield(lock); __spin_yield(lock);
} while (unlikely(lock->slock != 0)); } while (unlikely(lock->slock != 0));
HMT_medium(); HMT_medium();
...@@ -226,7 +238,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) ...@@ -226,7 +238,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
break; break;
do { do {
HMT_low(); HMT_low();
if (SHARED_PROCESSOR) if (is_shared_processor())
__rw_yield(rw); __rw_yield(rw);
} while (unlikely(rw->lock < 0)); } while (unlikely(rw->lock < 0));
HMT_medium(); HMT_medium();
...@@ -240,7 +252,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) ...@@ -240,7 +252,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
break; break;
do { do {
HMT_low(); HMT_low();
if (SHARED_PROCESSOR) if (is_shared_processor())
__rw_yield(rw); __rw_yield(rw);
} while (unlikely(rw->lock != 0)); } while (unlikely(rw->lock != 0));
HMT_medium(); HMT_medium();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment