Commit 5aad6cda authored by Will Deacon's avatar Will Deacon

arm64: atomics: Undefine internal macros after use

We use a bunch of internal macros when constructing our atomic and
cmpxchg routines in order to save on boilerplate. Avoid exposing these
directly to users of the header files.
Reviewed-by: default avatarAndrew Murray <andrew.murray@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent b32baf91
...@@ -32,6 +32,7 @@ ATOMIC_OP(atomic_add) ...@@ -32,6 +32,7 @@ ATOMIC_OP(atomic_add)
ATOMIC_OP(atomic_and) ATOMIC_OP(atomic_and)
ATOMIC_OP(atomic_sub) ATOMIC_OP(atomic_sub)
#undef ATOMIC_OP
#define ATOMIC_FETCH_OP(name, op) \ #define ATOMIC_FETCH_OP(name, op) \
static inline int arch_##op##name(int i, atomic_t *v) \ static inline int arch_##op##name(int i, atomic_t *v) \
...@@ -54,6 +55,8 @@ ATOMIC_FETCH_OPS(atomic_fetch_sub) ...@@ -54,6 +55,8 @@ ATOMIC_FETCH_OPS(atomic_fetch_sub)
ATOMIC_FETCH_OPS(atomic_add_return) ATOMIC_FETCH_OPS(atomic_add_return)
ATOMIC_FETCH_OPS(atomic_sub_return) ATOMIC_FETCH_OPS(atomic_sub_return)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_FETCH_OPS
#define ATOMIC64_OP(op) \ #define ATOMIC64_OP(op) \
static inline void arch_##op(long i, atomic64_t *v) \ static inline void arch_##op(long i, atomic64_t *v) \
...@@ -68,6 +71,7 @@ ATOMIC64_OP(atomic64_add) ...@@ -68,6 +71,7 @@ ATOMIC64_OP(atomic64_add)
ATOMIC64_OP(atomic64_and) ATOMIC64_OP(atomic64_and)
ATOMIC64_OP(atomic64_sub) ATOMIC64_OP(atomic64_sub)
#undef ATOMIC64_OP
#define ATOMIC64_FETCH_OP(name, op) \ #define ATOMIC64_FETCH_OP(name, op) \
static inline long arch_##op##name(long i, atomic64_t *v) \ static inline long arch_##op##name(long i, atomic64_t *v) \
...@@ -90,6 +94,9 @@ ATOMIC64_FETCH_OPS(atomic64_fetch_sub) ...@@ -90,6 +94,9 @@ ATOMIC64_FETCH_OPS(atomic64_fetch_sub)
ATOMIC64_FETCH_OPS(atomic64_add_return) ATOMIC64_FETCH_OPS(atomic64_add_return)
ATOMIC64_FETCH_OPS(atomic64_sub_return) ATOMIC64_FETCH_OPS(atomic64_sub_return)
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_FETCH_OPS
static inline long arch_atomic64_dec_if_positive(atomic64_t *v) static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
{ {
return __lse_ll_sc_body(atomic64_dec_if_positive, v); return __lse_ll_sc_body(atomic64_dec_if_positive, v);
......
...@@ -129,6 +129,8 @@ __CMPXCHG_CASE(mb_, 16) ...@@ -129,6 +129,8 @@ __CMPXCHG_CASE(mb_, 16)
__CMPXCHG_CASE(mb_, 32) __CMPXCHG_CASE(mb_, 32)
__CMPXCHG_CASE(mb_, 64) __CMPXCHG_CASE(mb_, 64)
#undef __CMPXCHG_CASE
#define __CMPXCHG_DBL(name) \ #define __CMPXCHG_DBL(name) \
static inline long __cmpxchg_double##name(unsigned long old1, \ static inline long __cmpxchg_double##name(unsigned long old1, \
unsigned long old2, \ unsigned long old2, \
...@@ -143,6 +145,8 @@ static inline long __cmpxchg_double##name(unsigned long old1, \ ...@@ -143,6 +145,8 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
__CMPXCHG_DBL( ) __CMPXCHG_DBL( )
__CMPXCHG_DBL(_mb) __CMPXCHG_DBL(_mb)
#undef __CMPXCHG_DBL
#define __CMPXCHG_GEN(sfx) \ #define __CMPXCHG_GEN(sfx) \
static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
unsigned long old, \ unsigned long old, \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment