Commit 6822a84d authored by Will Deacon's avatar Will Deacon Committed by Ingo Molnar

locking/atomic, arch/arm64: Generate LSE non-return cases using common macros

atomic[64]_{add,and,andnot,or,xor} all follow the same patterns, so
generate them using macros, like we do for the LL/SC case already.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steve Capper <steve.capper@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/1461344493-8262-1-git-send-email-will.deacon@arm.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e490f9b1
...@@ -26,54 +26,25 @@ ...@@ -26,54 +26,25 @@
#endif #endif
#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op) #define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
#define ATOMIC_OP(op, asm_op) \
static inline void atomic_andnot(int i, atomic_t *v) static inline void atomic_##op(int i, atomic_t *v) \
{ { \
register int w0 asm ("w0") = i; register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; register atomic_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot), asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
" stclr %w[i], %[v]\n") " " #asm_op " %w[i], %[v]\n") \
: [i] "+r" (w0), [v] "+Q" (v->counter) : [i] "+r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) : "r" (x1) \
: __LL_SC_CLOBBERS); : __LL_SC_CLOBBERS); \
}
static inline void atomic_or(int i, atomic_t *v)
{
register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v;
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
" stset %w[i], %[v]\n")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
: __LL_SC_CLOBBERS);
}
static inline void atomic_xor(int i, atomic_t *v)
{
register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v;
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
" steor %w[i], %[v]\n")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
: __LL_SC_CLOBBERS);
} }
static inline void atomic_add(int i, atomic_t *v) ATOMIC_OP(andnot, stclr)
{ ATOMIC_OP(or, stset)
register int w0 asm ("w0") = i; ATOMIC_OP(xor, steor)
register atomic_t *x1 asm ("x1") = v; ATOMIC_OP(add, stadd)
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add), #undef ATOMIC_OP
" stadd %w[i], %[v]\n")
: [i] "+r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
: __LL_SC_CLOBBERS);
}
#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
static inline int atomic_add_return##name(int i, atomic_t *v) \ static inline int atomic_add_return##name(int i, atomic_t *v) \
...@@ -167,54 +138,25 @@ ATOMIC_OP_SUB_RETURN( , al, "memory") ...@@ -167,54 +138,25 @@ ATOMIC_OP_SUB_RETURN( , al, "memory")
#undef __LL_SC_ATOMIC #undef __LL_SC_ATOMIC
#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op) #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
#define ATOMIC64_OP(op, asm_op) \
static inline void atomic64_andnot(long i, atomic64_t *v) static inline void atomic64_##op(long i, atomic64_t *v) \
{ { \
register long x0 asm ("x0") = i; register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; register atomic64_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot), asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
" stclr %[i], %[v]\n") " " #asm_op " %[i], %[v]\n") \
: [i] "+r" (x0), [v] "+Q" (v->counter) : [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) : "r" (x1) \
: __LL_SC_CLOBBERS); : __LL_SC_CLOBBERS); \
}
static inline void atomic64_or(long i, atomic64_t *v)
{
register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v;
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
" stset %[i], %[v]\n")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
: __LL_SC_CLOBBERS);
}
static inline void atomic64_xor(long i, atomic64_t *v)
{
register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v;
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
" steor %[i], %[v]\n")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
: __LL_SC_CLOBBERS);
} }
static inline void atomic64_add(long i, atomic64_t *v) ATOMIC64_OP(andnot, stclr)
{ ATOMIC64_OP(or, stset)
register long x0 asm ("x0") = i; ATOMIC64_OP(xor, steor)
register atomic64_t *x1 asm ("x1") = v; ATOMIC64_OP(add, stadd)
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add), #undef ATOMIC64_OP
" stadd %[i], %[v]\n")
: [i] "+r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
: __LL_SC_CLOBBERS);
}
#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
static inline long atomic64_add_return##name(long i, atomic64_t *v) \ static inline long atomic64_add_return##name(long i, atomic64_t *v) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment