Commit 00b4c907 authored by Russell King's avatar Russell King Committed by Russell King

[ARM SMP] Use event instructions for spinlocks

ARMv6K CPUs have SEV (send event) and WFE (wait for event) instructions
which allow the CPU clock to be suspended until another CPU issues a
SEV, rather than spinning on the lock wasting power.  Make use of these
instructions.

Note that WFE does not wait if an event has been sent since the last WFE
cleared the event status, so although it may look racy, the instruction
implementation ensures that these are dealt with.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent df2f5e72
...@@ -30,6 +30,9 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ...@@ -30,6 +30,9 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
__asm__ __volatile__( __asm__ __volatile__(
"1: ldrex %0, [%1]\n" "1: ldrex %0, [%1]\n"
" teq %0, #0\n" " teq %0, #0\n"
#ifdef CONFIG_CPU_32v6K
" wfene\n"
#endif
" strexeq %0, %2, [%1]\n" " strexeq %0, %2, [%1]\n"
" teqeq %0, #0\n" " teqeq %0, #0\n"
" bne 1b" " bne 1b"
...@@ -65,7 +68,11 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) ...@@ -65,7 +68,11 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
smp_mb(); smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
" str %1, [%0]" " str %1, [%0]\n"
#ifdef CONFIG_CPU_32v6K
" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */
" sev"
#endif
: :
: "r" (&lock->lock), "r" (0) : "r" (&lock->lock), "r" (0)
: "cc"); : "cc");
...@@ -87,6 +94,9 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) ...@@ -87,6 +94,9 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
__asm__ __volatile__( __asm__ __volatile__(
"1: ldrex %0, [%1]\n" "1: ldrex %0, [%1]\n"
" teq %0, #0\n" " teq %0, #0\n"
#ifdef CONFIG_CPU_32v6K
" wfene\n"
#endif
" strexeq %0, %2, [%1]\n" " strexeq %0, %2, [%1]\n"
" teq %0, #0\n" " teq %0, #0\n"
" bne 1b" " bne 1b"
...@@ -122,7 +132,11 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) ...@@ -122,7 +132,11 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
smp_mb(); smp_mb();
__asm__ __volatile__( __asm__ __volatile__(
"str %1, [%0]" "str %1, [%0]\n"
#ifdef CONFIG_CPU_32v6K
" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */
" sev\n"
#endif
: :
: "r" (&rw->lock), "r" (0) : "r" (&rw->lock), "r" (0)
: "cc"); : "cc");
...@@ -148,6 +162,9 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) ...@@ -148,6 +162,9 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
"1: ldrex %0, [%2]\n" "1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n" " adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n" " strexpl %1, %0, [%2]\n"
#ifdef CONFIG_CPU_32v6K
" wfemi\n"
#endif
" rsbpls %0, %1, #0\n" " rsbpls %0, %1, #0\n"
" bmi 1b" " bmi 1b"
: "=&r" (tmp), "=&r" (tmp2) : "=&r" (tmp), "=&r" (tmp2)
...@@ -169,6 +186,11 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) ...@@ -169,6 +186,11 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
" strex %1, %0, [%2]\n" " strex %1, %0, [%2]\n"
" teq %1, #0\n" " teq %1, #0\n"
" bne 1b" " bne 1b"
#ifdef CONFIG_CPU_32v6K
"\n cmp %0, #0\n"
" mcreq p15, 0, %0, c7, c10, 4\n"
" seveq"
#endif
: "=&r" (tmp), "=&r" (tmp2) : "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock) : "r" (&rw->lock)
: "cc"); : "cc");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment