Commit 135247bf authored by Russell King's avatar Russell King

[ARM] Update atomic.h

This re-jigs atomic.h by providing atomic_add_return and
atomic_sub_return as other architectures do.  This allows us to
implement the atomic ops that test the new value without having
to write the underlying atomic operation in various forms.
parent 1f20fe30
......@@ -59,50 +59,47 @@ static inline void atomic_add(int i, atomic_t *v)
: "cc");
}
static inline void atomic_sub(int i, atomic_t *v)
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long tmp, tmp2;
unsigned long tmp;
int result;
__asm__ __volatile__("@ atomic_sub\n"
__asm__ __volatile__("@ atomic_add_return\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, %3\n"
" add %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter), "Ir" (i)
: "cc");
}
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
return result;
}
static inline int atomic_dec_and_test(atomic_t *v)
static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long tmp;
int result;
unsigned long tmp, tmp2;
__asm__ __volatile__("@ atomic_dec_and_test\n"
__asm__ __volatile__("@ atomic_sub\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
" sub %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter)
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&v->counter), "Ir" (i)
: "cc");
return result == 0;
}
static inline int atomic_add_negative(int i, atomic_t *v)
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;
__asm__ __volatile__("@ atomic_add_negative\n"
__asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
" sub %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
......@@ -110,7 +107,7 @@ static inline int atomic_add_negative(int i, atomic_t *v)
: "r" (&v->counter), "Ir" (i)
: "cc");
return result < 0;
return result;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
......@@ -147,57 +144,39 @@ static inline void atomic_add(int i, atomic_t *v)
local_irq_restore(flags);
}
static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long flags;
local_irq_save(flags);
v->counter -= i;
local_irq_restore(flags);
}
static inline void atomic_inc(atomic_t *v)
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
int val;
local_irq_save(flags);
v->counter += 1;
val = v->counter;
v->counter = val += i;
local_irq_restore(flags);
}
static inline void atomic_dec(atomic_t *v)
{
unsigned long flags;
local_irq_save(flags);
v->counter -= 1;
local_irq_restore(flags);
return val;
}
static inline int atomic_dec_and_test(atomic_t *v)
static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long flags;
int val;
local_irq_save(flags);
val = v->counter;
v->counter = val -= 1;
v->counter -= i;
local_irq_restore(flags);
return val == 0;
}
static inline int atomic_add_negative(int i, atomic_t *v)
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
int val;
local_irq_save(flags);
val = v->counter;
v->counter = val += i;
v->counter = val -= i;
local_irq_restore(flags);
return val < 0;
return val;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
......@@ -211,6 +190,14 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#endif /* __LINUX_ARM_ARCH__ */
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
/* Atomic operations are already serializing on ARM */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment