Commit dcfd7615 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Luis Henriques

locking,arch,arc: Fold atomic_ops

commit f7d11e93 upstream.

Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

This also prepares for easy addition of new ops.
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Link: http://lkml.kernel.org/r/20140508135851.886055622@infradead.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
[ luis: 3.16 prereq for:
  2576c28e "ARC: add smp barriers around atomics per Documentation/atomic_ops.txt"
Signed-off-by: default avatarLuis Henriques <luis.henriques@canonical.com>
parent 8e4a1999
...@@ -25,79 +25,36 @@ ...@@ -25,79 +25,36 @@
#define atomic_set(v, i) (((v)->counter) = (i)) #define atomic_set(v, i) (((v)->counter) = (i))
static inline void atomic_add(int i, atomic_t *v) #define ATOMIC_OP(op, c_op, asm_op) \
{ static inline void atomic_##op(int i, atomic_t *v) \
unsigned int temp; { \
unsigned int temp; \
__asm__ __volatile__( \
"1: llock %0, [%1] \n" __asm__ __volatile__( \
" add %0, %0, %2 \n" "1: llock %0, [%1] \n" \
" scond %0, [%1] \n" " " #asm_op " %0, %0, %2 \n" \
" bnz 1b \n" " scond %0, [%1] \n" \
: "=&r"(temp) /* Early clobber, to prevent reg reuse */ " bnz 1b \n" \
: "r"(&v->counter), "ir"(i) : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
: "cc"); : "r"(&v->counter), "ir"(i) \
} : "cc"); \
} \
static inline void atomic_sub(int i, atomic_t *v)
{ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
unsigned int temp; static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
__asm__ __volatile__( unsigned int temp; \
"1: llock %0, [%1] \n" \
" sub %0, %0, %2 \n" __asm__ __volatile__( \
" scond %0, [%1] \n" "1: llock %0, [%1] \n" \
" bnz 1b \n" " " #asm_op " %0, %0, %2 \n" \
: "=&r"(temp) " scond %0, [%1] \n" \
: "r"(&v->counter), "ir"(i) " bnz 1b \n" \
: "cc"); : "=&r"(temp) \
} : "r"(&v->counter), "ir"(i) \
: "cc"); \
/* add and also return the new value */ \
static inline int atomic_add_return(int i, atomic_t *v) return temp; \
{
unsigned int temp;
__asm__ __volatile__(
"1: llock %0, [%1] \n"
" add %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(&v->counter), "ir"(i)
: "cc");
return temp;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned int temp;
__asm__ __volatile__(
"1: llock %0, [%1] \n"
" sub %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(&v->counter), "ir"(i)
: "cc");
return temp;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned int temp;
__asm__ __volatile__(
"1: llock %0, [%1] \n"
" bic %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(addr), "ir"(mask)
: "cc");
} }
#else /* !CONFIG_ARC_HAS_LLSC */ #else /* !CONFIG_ARC_HAS_LLSC */
...@@ -126,6 +83,7 @@ static inline void atomic_set(atomic_t *v, int i) ...@@ -126,6 +83,7 @@ static inline void atomic_set(atomic_t *v, int i)
v->counter = i; v->counter = i;
atomic_ops_unlock(flags); atomic_ops_unlock(flags);
} }
#endif #endif
/* /*
...@@ -133,62 +91,46 @@ static inline void atomic_set(atomic_t *v, int i) ...@@ -133,62 +91,46 @@ static inline void atomic_set(atomic_t *v, int i)
* Locking would change to irq-disabling only (UP) and spinlocks (SMP) * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
*/ */
static inline void atomic_add(int i, atomic_t *v) #define ATOMIC_OP(op, c_op, asm_op) \
{ static inline void atomic_##op(int i, atomic_t *v) \
unsigned long flags; { \
unsigned long flags; \
atomic_ops_lock(flags); \
v->counter += i; atomic_ops_lock(flags); \
atomic_ops_unlock(flags); v->counter c_op i; \
atomic_ops_unlock(flags); \
} }
static inline void atomic_sub(int i, atomic_t *v) #define ATOMIC_OP_RETURN(op, c_op) \
{ static inline int atomic_##op##_return(int i, atomic_t *v) \
unsigned long flags; { \
unsigned long flags; \
atomic_ops_lock(flags); unsigned long temp; \
v->counter -= i; \
atomic_ops_unlock(flags); atomic_ops_lock(flags); \
temp = v->counter; \
temp c_op i; \
v->counter = temp; \
atomic_ops_unlock(flags); \
\
return temp; \
} }
static inline int atomic_add_return(int i, atomic_t *v) #endif /* !CONFIG_ARC_HAS_LLSC */
{
unsigned long flags;
unsigned long temp;
atomic_ops_lock(flags);
temp = v->counter;
temp += i;
v->counter = temp;
atomic_ops_unlock(flags);
return temp;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
unsigned long temp;
atomic_ops_lock(flags);
temp = v->counter;
temp -= i;
v->counter = temp;
atomic_ops_unlock(flags);
return temp; #define ATOMIC_OPS(op, c_op, asm_op) \
} ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op)
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) ATOMIC_OPS(add, +=, add)
{ ATOMIC_OPS(sub, -=, sub)
unsigned long flags; ATOMIC_OP(and, &=, and)
atomic_ops_lock(flags); #define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
*addr &= ~mask;
atomic_ops_unlock(flags);
}
#endif /* !CONFIG_ARC_HAS_LLSC */ #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
/** /**
* __atomic_add_unless - add unless the number is a given value * __atomic_add_unless - add unless the number is a given value
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment