Commit af095dd6 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking,arch,powerpc: Fold atomic_ops

Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

Requires asm_op because PPC asm is weird :-)
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/20140508135852.713980957@infradead.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 15e3f6d7
...@@ -26,76 +26,53 @@ static __inline__ void atomic_set(atomic_t *v, int i) ...@@ -26,76 +26,53 @@ static __inline__ void atomic_set(atomic_t *v, int i)
__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
} }
static __inline__ void atomic_add(int a, atomic_t *v) #define ATOMIC_OP(op, asm_op) \
{ static __inline__ void atomic_##op(int a, atomic_t *v) \
int t; { \
int t; \
__asm__ __volatile__( \
"1: lwarx %0,0,%3 # atomic_add\n\ __asm__ __volatile__( \
add %0,%2,%0\n" "1: lwarx %0,0,%3 # atomic_" #op "\n" \
PPC405_ERR77(0,%3) #asm_op " %0,%2,%0\n" \
" stwcx. %0,0,%3 \n\ PPC405_ERR77(0,%3) \
bne- 1b" " stwcx. %0,0,%3 \n" \
: "=&r" (t), "+m" (v->counter) " bne- 1b\n" \
: "r" (a), "r" (&v->counter) : "=&r" (t), "+m" (v->counter) \
: "cc"); : "r" (a), "r" (&v->counter) \
: "cc"); \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
{ \
int t; \
\
__asm__ __volatile__( \
PPC_ATOMIC_ENTRY_BARRIER \
"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
#asm_op " %0,%1,%0\n" \
PPC405_ERR77(0,%2) \
" stwcx. %0,0,%2 \n" \
" bne- 1b\n" \
PPC_ATOMIC_EXIT_BARRIER \
: "=&r" (t) \
: "r" (a), "r" (&v->counter) \
: "cc", "memory"); \
\
return t; \
} }
static __inline__ int atomic_add_return(int a, atomic_t *v) #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
{
int t;
__asm__ __volatile__( ATOMIC_OPS(add, add)
PPC_ATOMIC_ENTRY_BARRIER ATOMIC_OPS(sub, subf)
"1: lwarx %0,0,%2 # atomic_add_return\n\
add %0,%1,%0\n"
PPC405_ERR77(0,%2)
" stwcx. %0,0,%2 \n\
bne- 1b"
PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
return t; #undef ATOMIC_OPS
} #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
static __inline__ void atomic_sub(int a, atomic_t *v)
{
int t;
__asm__ __volatile__(
"1: lwarx %0,0,%3 # atomic_sub\n\
subf %0,%2,%0\n"
PPC405_ERR77(0,%3)
" stwcx. %0,0,%3 \n\
bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (a), "r" (&v->counter)
: "cc");
}
static __inline__ int atomic_sub_return(int a, atomic_t *v)
{
int t;
__asm__ __volatile__(
PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%2 # atomic_sub_return\n\
subf %0,%1,%0\n"
PPC405_ERR77(0,%2)
" stwcx. %0,0,%2 \n\
bne- 1b"
PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
return t;
}
static __inline__ void atomic_inc(atomic_t *v) static __inline__ void atomic_inc(atomic_t *v)
{ {
int t; int t;
...@@ -289,71 +266,50 @@ static __inline__ void atomic64_set(atomic64_t *v, long i) ...@@ -289,71 +266,50 @@ static __inline__ void atomic64_set(atomic64_t *v, long i)
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
} }
static __inline__ void atomic64_add(long a, atomic64_t *v) #define ATOMIC64_OP(op, asm_op) \
{ static __inline__ void atomic64_##op(long a, atomic64_t *v) \
long t; { \
long t; \
__asm__ __volatile__( \
"1: ldarx %0,0,%3 # atomic64_add\n\ __asm__ __volatile__( \
add %0,%2,%0\n\ "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
stdcx. %0,0,%3 \n\ #asm_op " %0,%2,%0\n" \
bne- 1b" " stdcx. %0,0,%3 \n" \
: "=&r" (t), "+m" (v->counter) " bne- 1b\n" \
: "r" (a), "r" (&v->counter) : "=&r" (t), "+m" (v->counter) \
: "cc"); : "r" (a), "r" (&v->counter) \
: "cc"); \
} }
static __inline__ long atomic64_add_return(long a, atomic64_t *v) #define ATOMIC64_OP_RETURN(op, asm_op) \
{ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
long t; { \
long t; \
__asm__ __volatile__( \
PPC_ATOMIC_ENTRY_BARRIER __asm__ __volatile__( \
"1: ldarx %0,0,%2 # atomic64_add_return\n\ PPC_ATOMIC_ENTRY_BARRIER \
add %0,%1,%0\n\ "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
stdcx. %0,0,%2 \n\ #asm_op " %0,%1,%0\n" \
bne- 1b" " stdcx. %0,0,%2 \n" \
PPC_ATOMIC_EXIT_BARRIER " bne- 1b\n" \
: "=&r" (t) PPC_ATOMIC_EXIT_BARRIER \
: "r" (a), "r" (&v->counter) : "=&r" (t) \
: "cc", "memory"); : "r" (a), "r" (&v->counter) \
: "cc", "memory"); \
return t; \
return t; \
} }
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
static __inline__ void atomic64_sub(long a, atomic64_t *v)
{
long t;
__asm__ __volatile__(
"1: ldarx %0,0,%3 # atomic64_sub\n\
subf %0,%2,%0\n\
stdcx. %0,0,%3 \n\
bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (a), "r" (&v->counter)
: "cc");
}
static __inline__ long atomic64_sub_return(long a, atomic64_t *v) ATOMIC64_OPS(add, add)
{ ATOMIC64_OPS(sub, subf)
long t;
__asm__ __volatile__( #undef ATOMIC64_OPS
PPC_ATOMIC_ENTRY_BARRIER #undef ATOMIC64_OP_RETURN
"1: ldarx %0,0,%2 # atomic64_sub_return\n\ #undef ATOMIC64_OP
subf %0,%1,%0\n\
stdcx. %0,0,%2 \n\
bne- 1b"
PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
return t; #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
}
static __inline__ void atomic64_inc(atomic64_t *v) static __inline__ void atomic64_inc(atomic64_t *v)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment