Commit d6dfe250 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking,arch,metag: Fold atomic_ops

Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

This also prepares for easy addition of new ops.
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-metag@vger.kernel.org
Link: http://lkml.kernel.org/r/20140508135852.453864110@infradead.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent d839bae4
...@@ -27,85 +27,56 @@ static inline int atomic_read(const atomic_t *v) ...@@ -27,85 +27,56 @@ static inline int atomic_read(const atomic_t *v)
return temp; return temp;
} }
static inline void atomic_add(int i, atomic_t *v) #define ATOMIC_OP(op) \
{ static inline void atomic_##op(int i, atomic_t *v) \
int temp; { \
int temp; \
asm volatile ( \
"1: LNKGETD %0, [%1]\n" asm volatile ( \
" ADD %0, %0, %2\n" "1: LNKGETD %0, [%1]\n" \
" LNKSETD [%1], %0\n" " " #op " %0, %0, %2\n" \
" DEFR %0, TXSTAT\n" " LNKSETD [%1], %0\n" \
" ANDT %0, %0, #HI(0x3f000000)\n" " DEFR %0, TXSTAT\n" \
" CMPT %0, #HI(0x02000000)\n" " ANDT %0, %0, #HI(0x3f000000)\n" \
" BNZ 1b\n" " CMPT %0, #HI(0x02000000)\n" \
: "=&d" (temp) " BNZ 1b\n" \
: "da" (&v->counter), "bd" (i) : "=&d" (temp) \
: "cc"); : "da" (&v->counter), "bd" (i) \
: "cc"); \
} \
#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
int result, temp; \
\
smp_mb(); \
\
asm volatile ( \
"1: LNKGETD %1, [%2]\n" \
" " #op " %1, %1, %3\n" \
" LNKSETD [%2], %1\n" \
" DEFR %0, TXSTAT\n" \
" ANDT %0, %0, #HI(0x3f000000)\n" \
" CMPT %0, #HI(0x02000000)\n" \
" BNZ 1b\n" \
: "=&d" (temp), "=&da" (result) \
: "da" (&v->counter), "bd" (i) \
: "cc"); \
\
smp_mb(); \
\
return result; \
} }
static inline void atomic_sub(int i, atomic_t *v) #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
{
int temp;
asm volatile ( ATOMIC_OPS(add)
"1: LNKGETD %0, [%1]\n" ATOMIC_OPS(sub)
" SUB %0, %0, %2\n"
" LNKSETD [%1], %0\n"
" DEFR %0, TXSTAT\n"
" ANDT %0, %0, #HI(0x3f000000)\n"
" CMPT %0, #HI(0x02000000)\n"
" BNZ 1b\n"
: "=&d" (temp)
: "da" (&v->counter), "bd" (i)
: "cc");
}
static inline int atomic_add_return(int i, atomic_t *v) #undef ATOMIC_OPS
{ #undef ATOMIC_OP_RETURN
int result, temp; #undef ATOMIC_OP
smp_mb();
asm volatile (
"1: LNKGETD %1, [%2]\n"
" ADD %1, %1, %3\n"
" LNKSETD [%2], %1\n"
" DEFR %0, TXSTAT\n"
" ANDT %0, %0, #HI(0x3f000000)\n"
" CMPT %0, #HI(0x02000000)\n"
" BNZ 1b\n"
: "=&d" (temp), "=&da" (result)
: "da" (&v->counter), "bd" (i)
: "cc");
smp_mb();
return result;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
int result, temp;
smp_mb();
asm volatile (
"1: LNKGETD %1, [%2]\n"
" SUB %1, %1, %3\n"
" LNKSETD [%2], %1\n"
" DEFR %0, TXSTAT\n"
" ANDT %0, %0, #HI(0x3f000000)\n"
" CMPT %0, #HI(0x02000000)\n"
" BNZ 1b\n"
: "=&d" (temp), "=&da" (result)
: "da" (&v->counter), "bd" (i)
: "cc");
smp_mb();
return result;
}
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
......
...@@ -37,55 +37,41 @@ static inline int atomic_set(atomic_t *v, int i) ...@@ -37,55 +37,41 @@ static inline int atomic_set(atomic_t *v, int i)
return i; return i;
} }
static inline void atomic_add(int i, atomic_t *v) #define ATOMIC_OP(op, c_op) \
{ static inline void atomic_##op(int i, atomic_t *v) \
unsigned long flags; { \
unsigned long flags; \
__global_lock1(flags); \
fence(); __global_lock1(flags); \
v->counter += i; fence(); \
__global_unlock1(flags); v->counter c_op i; \
__global_unlock1(flags); \
} \
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long result; \
unsigned long flags; \
\
__global_lock1(flags); \
result = v->counter; \
result c_op i; \
fence(); \
v->counter = result; \
__global_unlock1(flags); \
\
return result; \
} }
static inline void atomic_sub(int i, atomic_t *v) #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
{
unsigned long flags;
__global_lock1(flags); ATOMIC_OPS(add, +=)
fence(); ATOMIC_OPS(sub, -=)
v->counter -= i;
__global_unlock1(flags);
}
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long result;
unsigned long flags;
__global_lock1(flags); #undef ATOMIC_OPS
result = v->counter; #undef ATOMIC_OP_RETURN
result += i; #undef ATOMIC_OP
fence();
v->counter = result;
__global_unlock1(flags);
return result;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long result;
unsigned long flags;
__global_lock1(flags);
result = v->counter;
result -= i;
fence();
v->counter = result;
__global_unlock1(flags);
return result;
}
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment