Commit c9ebe21b authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking,arch,m32r: Fold atomic_ops

Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

This also prepares for easy addition of new ops.
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Hirokazu Takata <takata@linux-m32r.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: linux-m32r-ja@ml.linux-m32r.org
Cc: linux-m32r@ml.linux-m32r.org
Link: http://lkml.kernel.org/r/20140508135852.318635136@infradead.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 08be2dab
...@@ -39,85 +39,64 @@ ...@@ -39,85 +39,64 @@
*/ */
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic_set(v,i) (((v)->counter) = (i))
/**
* atomic_add_return - add integer to atomic variable and return it
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and return (@i + @v).
*/
static __inline__ int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
int result;
local_irq_save(flags);
__asm__ __volatile__ (
"# atomic_add_return \n\t"
DCACHE_CLEAR("%0", "r4", "%1")
M32R_LOCK" %0, @%1; \n\t"
"add %0, %2; \n\t"
M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (result)
: "r" (&v->counter), "r" (i)
: "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r4" #define __ATOMIC_CLOBBER , "r4"
#endif /* CONFIG_CHIP_M32700_TS1 */ #else
); #define __ATOMIC_CLOBBER
local_irq_restore(flags); #endif
return result; #define ATOMIC_OP(op) \
static __inline__ void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int result; \
\
local_irq_save(flags); \
__asm__ __volatile__ ( \
"# atomic_" #op " \n\t" \
DCACHE_CLEAR("%0", "r4", "%1") \
M32R_LOCK" %0, @%1; \n\t" \
#op " %0, %2; \n\t" \
M32R_UNLOCK" %0, @%1; \n\t" \
: "=&r" (result) \
: "r" (&v->counter), "r" (i) \
: "memory" \
__ATOMIC_CLOBBER \
); \
local_irq_restore(flags); \
} \
#define ATOMIC_OP_RETURN(op) \
static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int result; \
\
local_irq_save(flags); \
__asm__ __volatile__ ( \
"# atomic_" #op "_return \n\t" \
DCACHE_CLEAR("%0", "r4", "%1") \
M32R_LOCK" %0, @%1; \n\t" \
#op " %0, %2; \n\t" \
M32R_UNLOCK" %0, @%1; \n\t" \
: "=&r" (result) \
: "r" (&v->counter), "r" (i) \
: "memory" \
__ATOMIC_CLOBBER \
); \
local_irq_restore(flags); \
\
return result; \
} }
/** #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
* atomic_sub_return - subtract integer from atomic variable and return it
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and return (@v - @i).
*/
static __inline__ int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
int result;
local_irq_save(flags);
__asm__ __volatile__ (
"# atomic_sub_return \n\t"
DCACHE_CLEAR("%0", "r4", "%1")
M32R_LOCK" %0, @%1; \n\t"
"sub %0, %2; \n\t"
M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (result)
: "r" (&v->counter), "r" (i)
: "memory"
#ifdef CONFIG_CHIP_M32700_TS1
, "r4"
#endif /* CONFIG_CHIP_M32700_TS1 */
);
local_irq_restore(flags);
return result;
}
/** ATOMIC_OPS(add)
* atomic_add - add integer to atomic variable ATOMIC_OPS(sub)
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v.
*/
#define atomic_add(i,v) ((void) atomic_add_return((i), (v)))
/** #undef ATOMIC_OPS
* atomic_sub - subtract the atomic variable #undef ATOMIC_OP_RETURN
* @i: integer value to subtract #undef ATOMIC_OP
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v.
*/
#define atomic_sub(i,v) ((void) atomic_sub_return((i), (v)))
/** /**
* atomic_sub_and_test - subtract value from variable and test result * atomic_sub_and_test - subtract value from variable and test result
...@@ -151,9 +130,7 @@ static __inline__ int atomic_inc_return(atomic_t *v) ...@@ -151,9 +130,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
: "=&r" (result) : "=&r" (result)
: "r" (&v->counter) : "r" (&v->counter)
: "memory" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 __ATOMIC_CLOBBER
, "r4"
#endif /* CONFIG_CHIP_M32700_TS1 */
); );
local_irq_restore(flags); local_irq_restore(flags);
...@@ -181,9 +158,7 @@ static __inline__ int atomic_dec_return(atomic_t *v) ...@@ -181,9 +158,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
: "=&r" (result) : "=&r" (result)
: "r" (&v->counter) : "r" (&v->counter)
: "memory" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 __ATOMIC_CLOBBER
, "r4"
#endif /* CONFIG_CHIP_M32700_TS1 */
); );
local_irq_restore(flags); local_irq_restore(flags);
...@@ -280,9 +255,7 @@ static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) ...@@ -280,9 +255,7 @@ static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
: "=&r" (tmp) : "=&r" (tmp)
: "r" (addr), "r" (~mask) : "r" (addr), "r" (~mask)
: "memory" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 __ATOMIC_CLOBBER
, "r5"
#endif /* CONFIG_CHIP_M32700_TS1 */
); );
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -302,9 +275,7 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) ...@@ -302,9 +275,7 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
: "=&r" (tmp) : "=&r" (tmp)
: "r" (addr), "r" (mask) : "r" (addr), "r" (mask)
: "memory" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 __ATOMIC_CLOBBER
, "r5"
#endif /* CONFIG_CHIP_M32700_TS1 */
); );
local_irq_restore(flags); local_irq_restore(flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment