Commit aee9a554 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking,arch,arm: Fold atomic_ops

Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

This also prepares for easy addition of new ops.

Requires the asm_op because of eor.
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Cc: Chen Gang <gang.chen@asianux.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nicolas Pitre <nico@linaro.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Albin Tonnerre <albin.tonnerre@arm.com>
Cc: Victor Kamensky <victor.kamensky@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/20140508135851.939725247@infradead.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f7d11e93
...@@ -37,84 +37,47 @@ ...@@ -37,84 +37,47 @@
* store exclusive to ensure that these are atomic. We may loop * store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens. * to ensure that the update happens.
*/ */
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long tmp;
int result;
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_add\n"
"1: ldrex %0, [%3]\n"
" add %0, %0, %4\n"
" strex %1, %0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "Ir" (i)
: "cc");
}
static inline int atomic_add_return(int i, atomic_t *v) #define ATOMIC_OP(op, c_op, asm_op) \
{ static inline void atomic_##op(int i, atomic_t *v) \
unsigned long tmp; { \
int result; unsigned long tmp; \
int result; \
smp_mb(); \
prefetchw(&v->counter); prefetchw(&v->counter); \
__asm__ __volatile__("@ atomic_" #op "\n" \
__asm__ __volatile__("@ atomic_add_return\n" "1: ldrex %0, [%3]\n" \
"1: ldrex %0, [%3]\n" " " #asm_op " %0, %0, %4\n" \
" add %0, %0, %4\n" " strex %1, %0, [%3]\n" \
" strex %1, %0, [%3]\n" " teq %1, #0\n" \
" teq %1, #0\n" " bne 1b" \
" bne 1b" : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) \
: "r" (&v->counter), "Ir" (i) : "cc"); \
: "cc"); } \
smp_mb(); #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
return result; { \
} unsigned long tmp; \
int result; \
static inline void atomic_sub(int i, atomic_t *v) \
{ smp_mb(); \
unsigned long tmp; prefetchw(&v->counter); \
int result; \
__asm__ __volatile__("@ atomic_" #op "_return\n" \
prefetchw(&v->counter); "1: ldrex %0, [%3]\n" \
__asm__ __volatile__("@ atomic_sub\n" " " #asm_op " %0, %0, %4\n" \
"1: ldrex %0, [%3]\n" " strex %1, %0, [%3]\n" \
" sub %0, %0, %4\n" " teq %1, #0\n" \
" strex %1, %0, [%3]\n" " bne 1b" \
" teq %1, #0\n" : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
" bne 1b" : "r" (&v->counter), "Ir" (i) \
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "cc"); \
: "r" (&v->counter), "Ir" (i) \
: "cc"); smp_mb(); \
} \
return result; \
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;
smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%3]\n"
" sub %0, %0, %4\n"
" strex %1, %0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "Ir" (i)
: "cc");
smp_mb();
return result;
} }
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
...@@ -174,33 +137,29 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -174,33 +137,29 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#error SMP not supported on pre-ARMv6 CPUs #error SMP not supported on pre-ARMv6 CPUs
#endif #endif
static inline int atomic_add_return(int i, atomic_t *v) #define ATOMIC_OP(op, c_op, asm_op) \
{ static inline void atomic_##op(int i, atomic_t *v) \
unsigned long flags; { \
int val; unsigned long flags; \
\
raw_local_irq_save(flags); raw_local_irq_save(flags); \
val = v->counter; v->counter c_op i; \
v->counter = val += i; raw_local_irq_restore(flags); \
raw_local_irq_restore(flags); } \
return val; #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
} static inline int atomic_##op##_return(int i, atomic_t *v) \
#define atomic_add(i, v) (void) atomic_add_return(i, v) { \
unsigned long flags; \
static inline int atomic_sub_return(int i, atomic_t *v) int val; \
{ \
unsigned long flags; raw_local_irq_save(flags); \
int val; v->counter c_op i; \
val = v->counter; \
raw_local_irq_save(flags); raw_local_irq_restore(flags); \
val = v->counter; \
v->counter = val -= i; return val; \
raw_local_irq_restore(flags);
return val;
} }
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
...@@ -228,6 +187,17 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -228,6 +187,17 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#endif /* __LINUX_ARM_ARCH__ */ #endif /* __LINUX_ARM_ARCH__ */
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_inc(v) atomic_add(1, v) #define atomic_inc(v) atomic_add(1, v)
...@@ -300,89 +270,60 @@ static inline void atomic64_set(atomic64_t *v, long long i) ...@@ -300,89 +270,60 @@ static inline void atomic64_set(atomic64_t *v, long long i)
} }
#endif #endif
static inline void atomic64_add(long long i, atomic64_t *v) #define ATOMIC64_OP(op, op1, op2) \
{ static inline void atomic64_##op(long long i, atomic64_t *v) \
long long result; { \
unsigned long tmp; long long result; \
unsigned long tmp; \
prefetchw(&v->counter); \
__asm__ __volatile__("@ atomic64_add\n" prefetchw(&v->counter); \
"1: ldrexd %0, %H0, [%3]\n" __asm__ __volatile__("@ atomic64_" #op "\n" \
" adds %Q0, %Q0, %Q4\n" "1: ldrexd %0, %H0, [%3]\n" \
" adc %R0, %R0, %R4\n" " " #op1 " %Q0, %Q0, %Q4\n" \
" strexd %1, %0, %H0, [%3]\n" " " #op2 " %R0, %R0, %R4\n" \
" teq %1, #0\n" " strexd %1, %0, %H0, [%3]\n" \
" bne 1b" " teq %1, #0\n" \
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) " bne 1b" \
: "r" (&v->counter), "r" (i) : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
: "cc"); : "r" (&v->counter), "r" (i) \
} : "cc"); \
} \
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{ #define ATOMIC64_OP_RETURN(op, op1, op2) \
long long result; static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
unsigned long tmp; { \
long long result; \
smp_mb(); unsigned long tmp; \
prefetchw(&v->counter); \
smp_mb(); \
__asm__ __volatile__("@ atomic64_add_return\n" prefetchw(&v->counter); \
"1: ldrexd %0, %H0, [%3]\n" \
" adds %Q0, %Q0, %Q4\n" __asm__ __volatile__("@ atomic64_" #op "_return\n" \
" adc %R0, %R0, %R4\n" "1: ldrexd %0, %H0, [%3]\n" \
" strexd %1, %0, %H0, [%3]\n" " " #op1 " %Q0, %Q0, %Q4\n" \
" teq %1, #0\n" " " #op2 " %R0, %R0, %R4\n" \
" bne 1b" " strexd %1, %0, %H0, [%3]\n" \
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) " teq %1, #0\n" \
: "r" (&v->counter), "r" (i) " bne 1b" \
: "cc"); : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
: "r" (&v->counter), "r" (i) \
smp_mb(); : "cc"); \
\
return result; smp_mb(); \
} \
return result; \
static inline void atomic64_sub(long long i, atomic64_t *v)
{
long long result;
unsigned long tmp;
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_sub\n"
"1: ldrexd %0, %H0, [%3]\n"
" subs %Q0, %Q0, %Q4\n"
" sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (i)
: "cc");
} }
static inline long long atomic64_sub_return(long long i, atomic64_t *v) #define ATOMIC64_OPS(op, op1, op2) \
{ ATOMIC64_OP(op, op1, op2) \
long long result; ATOMIC64_OP_RETURN(op, op1, op2)
unsigned long tmp;
smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%3]\n"
" subs %Q0, %Q0, %Q4\n"
" sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (i)
: "cc");
smp_mb(); ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc)
return result; #undef ATOMIC64_OPS
} #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
long long new) long long new)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment