Commit 2bf3604c authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Michael Ellerman

powerpc/spinlock: Define smp_mb__after_spinlock only once

Instead of both queued and simple spinlocks doing it. Move
it into the arch's spinlock.h.
Signed-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210309015950.27688-2-dave@stgolabs.net
parent 93c043e3
...@@ -44,8 +44,6 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock) ...@@ -44,8 +44,6 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
} }
#define queued_spin_lock queued_spin_lock #define queued_spin_lock queued_spin_lock
#define smp_mb__after_spinlock() smp_mb()
static __always_inline int queued_spin_is_locked(struct qspinlock *lock) static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{ {
/* /*
......
...@@ -282,7 +282,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) ...@@ -282,7 +282,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
#define arch_read_relax(lock) rw_yield(lock) #define arch_read_relax(lock) rw_yield(lock)
#define arch_write_relax(lock) rw_yield(lock) #define arch_write_relax(lock) rw_yield(lock)
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()
#endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */ #endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */
...@@ -10,6 +10,9 @@ ...@@ -10,6 +10,9 @@
#include <asm/simple_spinlock.h> #include <asm/simple_spinlock.h>
#endif #endif
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()
#ifndef CONFIG_PARAVIRT_SPINLOCKS #ifndef CONFIG_PARAVIRT_SPINLOCKS
static inline void pv_spinlocks_init(void) { } static inline void pv_spinlocks_init(void) { }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment