Commit ade5ef92 authored by Mark Rutland's avatar Mark Rutland Committed by Ingo Molnar

atomics: Make conditional ops return 'bool'

Some of the atomics return a status value, which is a boolean value
describing whether the operation was performed. To make it clear that
this is a boolean value, let's update the common fallbacks to return
bool, fixing up the return values and comments likewise.

At the same time, let's simplify the description of the operations in
their respective comments.

The instrumented atomics and generic atomic64 implementation are updated
accordingly.

Note that atomic64_dec_if_positive() doesn't follow the usual test op
pattern, and returns the would-be decremented value. This is not
changed.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Reviewed-by: default avatarWill Deacon <will.deacon@arm.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/lkml/20180621121321.4761-5-mark.rutland@arm.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f74445b6
...@@ -205,7 +205,7 @@ static __always_inline s64 atomic64_dec_return(atomic64_t *v) ...@@ -205,7 +205,7 @@ static __always_inline s64 atomic64_dec_return(atomic64_t *v)
return arch_atomic64_dec_return(v); return arch_atomic64_dec_return(v);
} }
static __always_inline s64 atomic64_inc_not_zero(atomic64_t *v) static __always_inline bool atomic64_inc_not_zero(atomic64_t *v)
{ {
kasan_check_write(v, sizeof(*v)); kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_not_zero(v); return arch_atomic64_inc_not_zero(v);
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
*/ */
#ifndef _ASM_GENERIC_ATOMIC64_H #ifndef _ASM_GENERIC_ATOMIC64_H
#define _ASM_GENERIC_ATOMIC64_H #define _ASM_GENERIC_ATOMIC64_H
#include <linux/types.h>
typedef struct { typedef struct {
long long counter; long long counter;
...@@ -52,7 +53,7 @@ ATOMIC64_OPS(xor) ...@@ -52,7 +53,7 @@ ATOMIC64_OPS(xor)
extern long long atomic64_dec_if_positive(atomic64_t *v); extern long long atomic64_dec_if_positive(atomic64_t *v);
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
extern long long atomic64_xchg(atomic64_t *v, long long new); extern long long atomic64_xchg(atomic64_t *v, long long new);
extern int atomic64_add_unless(atomic64_t *v, long long a, long long u); extern bool atomic64_add_unless(atomic64_t *v, long long a, long long u);
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
#define atomic64_inc(v) atomic64_add(1LL, (v)) #define atomic64_inc(v) atomic64_add(1LL, (v))
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
/* Atomic operations usable in machine independent code */ /* Atomic operations usable in machine independent code */
#ifndef _LINUX_ATOMIC_H #ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H #define _LINUX_ATOMIC_H
#include <linux/types.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/barrier.h> #include <asm/barrier.h>
...@@ -525,10 +527,10 @@ ...@@ -525,10 +527,10 @@
* @a: the amount to add to v... * @a: the amount to add to v...
* @u: ...unless v is equal to u. * @u: ...unless v is equal to u.
* *
* Atomically adds @a to @v, so long as @v was not already @u. * Atomically adds @a to @v, if @v was not already @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns true if the addition was done.
*/ */
static inline int atomic_add_unless(atomic_t *v, int a, int u) static inline bool atomic_add_unless(atomic_t *v, int a, int u)
{ {
return atomic_fetch_add_unless(v, a, u) != u; return atomic_fetch_add_unless(v, a, u) != u;
} }
...@@ -537,8 +539,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) ...@@ -537,8 +539,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
* atomic_inc_not_zero - increment unless the number is zero * atomic_inc_not_zero - increment unless the number is zero
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically increments @v by 1, so long as @v is non-zero. * Atomically increments @v by 1, if @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise. * Returns true if the increment was done.
*/ */
#ifndef atomic_inc_not_zero #ifndef atomic_inc_not_zero
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
...@@ -572,28 +574,28 @@ static inline int atomic_fetch_andnot_release(int i, atomic_t *v) ...@@ -572,28 +574,28 @@ static inline int atomic_fetch_andnot_release(int i, atomic_t *v)
#endif #endif
#ifndef atomic_inc_unless_negative #ifndef atomic_inc_unless_negative
static inline int atomic_inc_unless_negative(atomic_t *p) static inline bool atomic_inc_unless_negative(atomic_t *p)
{ {
int v, v1; int v, v1;
for (v = 0; v >= 0; v = v1) { for (v = 0; v >= 0; v = v1) {
v1 = atomic_cmpxchg(p, v, v + 1); v1 = atomic_cmpxchg(p, v, v + 1);
if (likely(v1 == v)) if (likely(v1 == v))
return 1; return true;
} }
return 0; return false;
} }
#endif #endif
#ifndef atomic_dec_unless_positive #ifndef atomic_dec_unless_positive
static inline int atomic_dec_unless_positive(atomic_t *p) static inline bool atomic_dec_unless_positive(atomic_t *p)
{ {
int v, v1; int v, v1;
for (v = 0; v <= 0; v = v1) { for (v = 0; v <= 0; v = v1) {
v1 = atomic_cmpxchg(p, v, v - 1); v1 = atomic_cmpxchg(p, v, v - 1);
if (likely(v1 == v)) if (likely(v1 == v))
return 1; return true;
} }
return 0; return false;
} }
#endif #endif
......
...@@ -178,16 +178,16 @@ long long atomic64_xchg(atomic64_t *v, long long new) ...@@ -178,16 +178,16 @@ long long atomic64_xchg(atomic64_t *v, long long new)
} }
EXPORT_SYMBOL(atomic64_xchg); EXPORT_SYMBOL(atomic64_xchg);
int atomic64_add_unless(atomic64_t *v, long long a, long long u) bool atomic64_add_unless(atomic64_t *v, long long a, long long u)
{ {
unsigned long flags; unsigned long flags;
raw_spinlock_t *lock = lock_addr(v); raw_spinlock_t *lock = lock_addr(v);
int ret = 0; bool ret = false;
raw_spin_lock_irqsave(lock, flags); raw_spin_lock_irqsave(lock, flags);
if (v->counter != u) { if (v->counter != u) {
v->counter += a; v->counter += a;
ret = 1; ret = true;
} }
raw_spin_unlock_irqrestore(lock, flags); raw_spin_unlock_irqrestore(lock, flags);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment