Commit 0ca326de authored by Will Deacon's avatar Will Deacon Committed by Ingo Molnar

locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations

By defining our SMP atomics in terms of relaxed operations, we gain
a small reduction in code size and have acquire/release/fence variants
generated automatically by the core code.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman.Long@hp.com
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/1438880084-18856-9-git-send-email-will.deacon@arm.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent cd074aea
...@@ -57,12 +57,11 @@ static inline void atomic_##op(int i, atomic_t *v) \ ...@@ -57,12 +57,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \ } \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
int result; \ int result; \
\ \
smp_mb(); \
prefetchw(&v->counter); \ prefetchw(&v->counter); \
\ \
__asm__ __volatile__("@ atomic_" #op "_return\n" \ __asm__ __volatile__("@ atomic_" #op "_return\n" \
...@@ -75,17 +74,17 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -75,17 +74,17 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
: "r" (&v->counter), "Ir" (i) \ : "r" (&v->counter), "Ir" (i) \
: "cc"); \ : "cc"); \
\ \
smp_mb(); \
\
return result; \ return result; \
} }
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) #define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
{ {
int oldval; int oldval;
unsigned long res; unsigned long res;
smp_mb();
prefetchw(&ptr->counter); prefetchw(&ptr->counter);
do { do {
...@@ -99,10 +98,9 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) ...@@ -99,10 +98,9 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
: "cc"); : "cc");
} while (res); } while (res);
smp_mb();
return oldval; return oldval;
} }
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
static inline int __atomic_add_unless(atomic_t *v, int a, int u) static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{ {
...@@ -297,12 +295,12 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \ ...@@ -297,12 +295,12 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
} \ } \
#define ATOMIC64_OP_RETURN(op, op1, op2) \ #define ATOMIC64_OP_RETURN(op, op1, op2) \
static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ static inline long long \
atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
{ \ { \
long long result; \ long long result; \
unsigned long tmp; \ unsigned long tmp; \
\ \
smp_mb(); \
prefetchw(&v->counter); \ prefetchw(&v->counter); \
\ \
__asm__ __volatile__("@ atomic64_" #op "_return\n" \ __asm__ __volatile__("@ atomic64_" #op "_return\n" \
...@@ -316,8 +314,6 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ ...@@ -316,8 +314,6 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
: "r" (&v->counter), "r" (i) \ : "r" (&v->counter), "r" (i) \
: "cc"); \ : "cc"); \
\ \
smp_mb(); \
\
return result; \ return result; \
} }
...@@ -328,6 +324,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ ...@@ -328,6 +324,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
ATOMIC64_OPS(add, adds, adc) ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc) ATOMIC64_OPS(sub, subs, sbc)
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#define atomic64_andnot atomic64_andnot #define atomic64_andnot atomic64_andnot
ATOMIC64_OP(and, and, and) ATOMIC64_OP(and, and, and)
...@@ -339,13 +338,12 @@ ATOMIC64_OP(xor, eor, eor) ...@@ -339,13 +338,12 @@ ATOMIC64_OP(xor, eor, eor)
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP #undef ATOMIC64_OP
static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, static inline long long
long long new) atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
{ {
long long oldval; long long oldval;
unsigned long res; unsigned long res;
smp_mb();
prefetchw(&ptr->counter); prefetchw(&ptr->counter);
do { do {
...@@ -360,17 +358,15 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, ...@@ -360,17 +358,15 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
: "cc"); : "cc");
} while (res); } while (res);
smp_mb();
return oldval; return oldval;
} }
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
static inline long long atomic64_xchg(atomic64_t *ptr, long long new) static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
{ {
long long result; long long result;
unsigned long tmp; unsigned long tmp;
smp_mb();
prefetchw(&ptr->counter); prefetchw(&ptr->counter);
__asm__ __volatile__("@ atomic64_xchg\n" __asm__ __volatile__("@ atomic64_xchg\n"
...@@ -382,10 +378,9 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) ...@@ -382,10 +378,9 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
: "r" (&ptr->counter), "r" (new) : "r" (&ptr->counter), "r" (new)
: "cc"); : "cc");
smp_mb();
return result; return result;
} }
#define atomic64_xchg_relaxed atomic64_xchg_relaxed
static inline long long atomic64_dec_if_positive(atomic64_t *v) static inline long long atomic64_dec_if_positive(atomic64_t *v)
{ {
......
...@@ -35,7 +35,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -35,7 +35,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
unsigned int tmp; unsigned int tmp;
#endif #endif
smp_mb();
prefetchw((const void *)ptr); prefetchw((const void *)ptr);
switch (size) { switch (size) {
...@@ -98,12 +97,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -98,12 +97,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
__bad_xchg(ptr, size), ret = 0; __bad_xchg(ptr, size), ret = 0;
break; break;
} }
smp_mb();
return ret; return ret;
} }
#define xchg(ptr, x) ({ \ #define xchg_relaxed(ptr, x) ({ \
(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \ (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
sizeof(*(ptr))); \ sizeof(*(ptr))); \
}) })
...@@ -117,6 +115,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -117,6 +115,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#error "SMP is not supported on this platform" #error "SMP is not supported on this platform"
#endif #endif
#define xchg xchg_relaxed
/* /*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available. * them available.
...@@ -194,23 +194,11 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -194,23 +194,11 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return oldval; return oldval;
} }
static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, #define cmpxchg_relaxed(ptr,o,n) ({ \
unsigned long new, int size) (__typeof__(*(ptr)))__cmpxchg((ptr), \
{ (unsigned long)(o), \
unsigned long ret; (unsigned long)(n), \
sizeof(*(ptr))); \
smp_mb();
ret = __cmpxchg(ptr, old, new, size);
smp_mb();
return ret;
}
#define cmpxchg(ptr,o,n) ({ \
(__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))); \
}) })
static inline unsigned long __cmpxchg_local(volatile void *ptr, static inline unsigned long __cmpxchg_local(volatile void *ptr,
...@@ -273,25 +261,6 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr, ...@@ -273,25 +261,6 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n)) #define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
unsigned long long old,
unsigned long long new)
{
unsigned long long ret;
smp_mb();
ret = __cmpxchg64(ptr, old, new);
smp_mb();
return ret;
}
#define cmpxchg64(ptr, o, n) ({ \
(__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
})
#endif /* __LINUX_ARM_ARCH__ >= 6 */ #endif /* __LINUX_ARM_ARCH__ >= 6 */
#endif /* __ASM_ARM_CMPXCHG_H */ #endif /* __ASM_ARM_CMPXCHG_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment