Commit d4608dd5 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking,arch,xtensa: Fold atomic_ops

Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

This also prepares for easy addition of new ops.
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Chris Zankel <chris@zankel.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: linux-xtensa@linux-xtensa.org
Link: http://lkml.kernel.org/r/20140508135852.879575796@infradead.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 4f3316c2
...@@ -58,165 +58,96 @@ ...@@ -58,165 +58,96 @@
*/ */
#define atomic_set(v,i) ((v)->counter = (i)) #define atomic_set(v,i) ((v)->counter = (i))
/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v.
*/
static inline void atomic_add(int i, atomic_t * v)
{
#if XCHAL_HAVE_S32C1I #if XCHAL_HAVE_S32C1I
unsigned long tmp; #define ATOMIC_OP(op) \
int result; static inline void atomic_##op(int i, atomic_t * v) \
{ \
__asm__ __volatile__( unsigned long tmp; \
"1: l32i %1, %3, 0\n" int result; \
" wsr %1, scompare1\n" \
" add %0, %1, %2\n" __asm__ __volatile__( \
" s32c1i %0, %3, 0\n" "1: l32i %1, %3, 0\n" \
" bne %0, %1, 1b\n" " wsr %1, scompare1\n" \
: "=&a" (result), "=&a" (tmp) " " #op " %0, %1, %2\n" \
: "a" (i), "a" (v) " s32c1i %0, %3, 0\n" \
: "memory" " bne %0, %1, 1b\n" \
); : "=&a" (result), "=&a" (tmp) \
#else : "a" (i), "a" (v) \
unsigned int vval; : "memory" \
); \
__asm__ __volatile__( } \
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n" #define ATOMIC_OP_RETURN(op) \
" add %0, %0, %1\n" static inline int atomic_##op##_return(int i, atomic_t * v) \
" s32i %0, %2, 0\n" { \
" wsr a15, ps\n" unsigned long tmp; \
" rsync\n" int result; \
: "=&a" (vval) \
: "a" (i), "a" (v) __asm__ __volatile__( \
: "a15", "memory" "1: l32i %1, %3, 0\n" \
); " wsr %1, scompare1\n" \
#endif " " #op " %0, %1, %2\n" \
} " s32c1i %0, %3, 0\n" \
" bne %0, %1, 1b\n" \
/** " " #op " %0, %0, %2\n" \
* atomic_sub - subtract the atomic variable : "=&a" (result), "=&a" (tmp) \
* @i: integer value to subtract : "a" (i), "a" (v) \
* @v: pointer of type atomic_t : "memory" \
* ); \
* Atomically subtracts @i from @v. \
*/ return result; \
static inline void atomic_sub(int i, atomic_t *v)
{
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" sub %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (i), "a" (v)
: "memory"
);
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" sub %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
#endif
} }
/* #else /* XCHAL_HAVE_S32C1I */
* We use atomic_{add|sub}_return to define other functions.
*/ #define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t * v) \
static inline int atomic_add_return(int i, atomic_t * v) { \
{ unsigned int vval; \
#if XCHAL_HAVE_S32C1I \
unsigned long tmp; __asm__ __volatile__( \
int result; " rsil a15, "__stringify(LOCKLEVEL)"\n"\
" l32i %0, %2, 0\n" \
__asm__ __volatile__( " " #op " %0, %0, %1\n" \
"1: l32i %1, %3, 0\n" " s32i %0, %2, 0\n" \
" wsr %1, scompare1\n" " wsr a15, ps\n" \
" add %0, %1, %2\n" " rsync\n" \
" s32c1i %0, %3, 0\n" : "=&a" (vval) \
" bne %0, %1, 1b\n" : "a" (i), "a" (v) \
" add %0, %0, %2\n" : "a15", "memory" \
: "=&a" (result), "=&a" (tmp) ); \
: "a" (i), "a" (v) } \
: "memory"
); #define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t * v) \
return result; { \
#else unsigned int vval; \
unsigned int vval; \
__asm__ __volatile__( \
__asm__ __volatile__( " rsil a15,"__stringify(LOCKLEVEL)"\n" \
" rsil a15,"__stringify(LOCKLEVEL)"\n" " l32i %0, %2, 0\n" \
" l32i %0, %2, 0\n" " " #op " %0, %0, %1\n" \
" add %0, %0, %1\n" " s32i %0, %2, 0\n" \
" s32i %0, %2, 0\n" " wsr a15, ps\n" \
" wsr a15, ps\n" " rsync\n" \
" rsync\n" : "=&a" (vval) \
: "=&a" (vval) : "a" (i), "a" (v) \
: "a" (i), "a" (v) : "a15", "memory" \
: "a15", "memory" ); \
); \
return vval; \
return vval;
#endif
} }
static inline int atomic_sub_return(int i, atomic_t * v) #endif /* XCHAL_HAVE_S32C1I */
{
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__( #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" sub %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
" sub %0, %0, %2\n"
: "=&a" (result), "=&a" (tmp)
: "a" (i), "a" (v)
: "memory"
);
return result; ATOMIC_OPS(add)
#else ATOMIC_OPS(sub)
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" sub %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
return vval; #undef ATOMIC_OPS
#endif #undef ATOMIC_OP_RETURN
} #undef ATOMIC_OP
/** /**
* atomic_sub_and_test - subtract value from variable and test result * atomic_sub_and_test - subtract value from variable and test result
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment