Commit 2856f5e3 authored by Mathieu Desnoyers's avatar Mathieu Desnoyers Committed by Linus Torvalds

atomic.h: atomic_add_unless as inline. Remove system.h atomic.h circular dependency

atomic_add_unless as inline. Remove system.h atomic.h circular dependency.
I agree (with Andi Kleen) this typeof is not needed and more error
prone. All the original atomic.h code that uses cmpxchg (which includes
the atomic_add_unless) uses defines instead of inline functions,
probably to circumvent a circular dependency between system.h and
atomic.h on powerpc (which my patch addresses). Therefore, it makes
sense to use inline functions that will provide type checking.

atomic_add_unless as inline. Remove system.h atomic.h circular dependency.
Digging into the FRV architecture shows me that it is also affected by
such a circular dependency. Here is the diff applying this against the
rest of my atomic.h patches.

It applies over the atomic.h standardization patches.
Signed-off-by: default avatarMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 79d365a3
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define _ALPHA_ATOMIC_H #define _ALPHA_ATOMIC_H
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/system.h>
/* /*
* Atomic operations that C can't guarantee us. Useful for * Atomic operations that C can't guarantee us. Useful for
...@@ -190,20 +191,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) ...@@ -190,20 +191,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
#define atomic_add_unless(v, a, u) \ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
({ \ {
__typeof__((v)->counter) c, old; \ int c, old;
c = atomic_read(v); \ c = atomic_read(v);
for (;;) { \ for (;;) {
if (unlikely(c == (u))) \ if (unlikely(c == (u)))
break; \ break;
old = atomic_cmpxchg((v), c, c + (a)); \ old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c)) \ if (likely(old == c))
break; \ break;
c = old; \ c = old;
} \ }
c != (u); \ return c != (u);
}) }
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/** /**
...@@ -215,20 +217,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) ...@@ -215,20 +217,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
#define atomic64_add_unless(v, a, u) \ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
({ \ {
__typeof__((v)->counter) c, old; \ long c, old;
c = atomic64_read(v); \ c = atomic64_read(v);
for (;;) { \ for (;;) {
if (unlikely(c == (u))) \ if (unlikely(c == (u)))
break; \ break;
old = atomic64_cmpxchg((v), c, c + (a)); \ old = atomic64_cmpxchg((v), c, c + (a));
if (likely(old == c)) \ if (likely(old == c))
break; \ break;
c = old; \ c = old;
} \ }
c != (u); \ return c != (u);
}) }
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#define __ASM_ARM_ATOMIC_H #define __ASM_ARM_ATOMIC_H
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/system.h>
typedef struct { volatile int counter; } atomic_t; typedef struct { volatile int counter; } atomic_t;
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#ifndef __ASM_ARM_ATOMIC_H #ifndef __ASM_ARM_ATOMIC_H
#define __ASM_ARM_ATOMIC_H #define __ASM_ARM_ATOMIC_H
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#error SMP is NOT supported #error SMP is NOT supported
#endif #endif
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/spr-regs.h> #include <asm/spr-regs.h>
#include <asm/system.h>
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#error not SMP safe #error not SMP safe
...@@ -258,85 +259,23 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v); ...@@ -258,85 +259,23 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
#define tas(ptr) (xchg((ptr), 1)) #define tas(ptr) (xchg((ptr), 1))
/*****************************************************************************/
/*
* compare and conditionally exchange value with memory
* - if (*ptr == test) then orig = *ptr; *ptr = test;
* - if (*ptr != test) then orig = *ptr;
*/
#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
#define cmpxchg(ptr, test, new) \
({ \
__typeof__(ptr) __xg_ptr = (ptr); \
__typeof__(*(ptr)) __xg_orig, __xg_tmp; \
__typeof__(*(ptr)) __xg_test = (test); \
__typeof__(*(ptr)) __xg_new = (new); \
\
switch (sizeof(__xg_orig)) { \
case 4: \
asm volatile( \
"0: \n" \
" orcc gr0,gr0,gr0,icc3 \n" \
" ckeq icc3,cc7 \n" \
" ld.p %M0,%1 \n" \
" orcr cc7,cc7,cc3 \n" \
" sub%I4cc %1,%4,%2,icc0 \n" \
" bne icc0,#0,1f \n" \
" cst.p %3,%M0 ,cc3,#1 \n" \
" corcc gr29,gr29,gr0 ,cc3,#1 \n" \
" beq icc3,#0,0b \n" \
"1: \n" \
: "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
: "r"(__xg_new), "NPr"(__xg_test) \
: "memory", "cc7", "cc3", "icc3", "icc0" \
); \
break; \
\
default: \
__xg_orig = 0; \
asm volatile("break"); \
break; \
} \
\
__xg_orig; \
})
#else
extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
#define cmpxchg(ptr, test, new) \
({ \
__typeof__(ptr) __xg_ptr = (ptr); \
__typeof__(*(ptr)) __xg_orig; \
__typeof__(*(ptr)) __xg_test = (test); \
__typeof__(*(ptr)) __xg_new = (new); \
\
switch (sizeof(__xg_orig)) { \
case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \
default: \
__xg_orig = 0; \
asm volatile("break"); \
break; \
} \
\
__xg_orig; \
})
#endif
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
({ \ {
int c, old; \ int c, old;
c = atomic_read(v); \ c = atomic_read(v);
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ for (;;) {
c = old; \ if (unlikely(c == (u)))
c != (u); \ break;
}) old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#define _ASM_SYSTEM_H #define _ASM_SYSTEM_H
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/atomic.h>
struct thread_struct; struct thread_struct;
...@@ -197,4 +196,73 @@ extern void free_initmem(void); ...@@ -197,4 +196,73 @@ extern void free_initmem(void);
#define arch_align_stack(x) (x) #define arch_align_stack(x) (x)
/*****************************************************************************/
/*
* compare and conditionally exchange value with memory
* - if (*ptr == test) then orig = *ptr; *ptr = test;
* - if (*ptr != test) then orig = *ptr;
*/
#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
#define cmpxchg(ptr, test, new) \
({ \
__typeof__(ptr) __xg_ptr = (ptr); \
__typeof__(*(ptr)) __xg_orig, __xg_tmp; \
__typeof__(*(ptr)) __xg_test = (test); \
__typeof__(*(ptr)) __xg_new = (new); \
\
switch (sizeof(__xg_orig)) { \
case 4: \
asm volatile( \
"0: \n" \
" orcc gr0,gr0,gr0,icc3 \n" \
" ckeq icc3,cc7 \n" \
" ld.p %M0,%1 \n" \
" orcr cc7,cc7,cc3 \n" \
" sub%I4cc %1,%4,%2,icc0 \n" \
" bne icc0,#0,1f \n" \
" cst.p %3,%M0 ,cc3,#1 \n" \
" corcc gr29,gr29,gr0 ,cc3,#1 \n" \
" beq icc3,#0,0b \n" \
"1: \n" \
: "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
: "r"(__xg_new), "NPr"(__xg_test) \
: "memory", "cc7", "cc3", "icc3", "icc0" \
); \
break; \
\
default: \
__xg_orig = 0; \
asm volatile("break"); \
break; \
} \
\
__xg_orig; \
})
#else
extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
#define cmpxchg(ptr, test, new) \
({ \
__typeof__(ptr) __xg_ptr = (ptr); \
__typeof__(*(ptr)) __xg_orig; \
__typeof__(*(ptr)) __xg_test = (test); \
__typeof__(*(ptr)) __xg_new = (new); \
\
switch (sizeof(__xg_orig)) { \
case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \
default: \
__xg_orig = 0; \
asm volatile("break"); \
break; \
} \
\
__xg_orig; \
})
#endif
#endif /* _ASM_SYSTEM_H */ #endif /* _ASM_SYSTEM_H */
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
*/ */
#include <asm/types.h> #include <asm/types.h>
#include <asm/system.h>
/* /*
* Suppport for atomic_long_t * Suppport for atomic_long_t
...@@ -123,8 +122,12 @@ static inline long atomic_long_dec_return(atomic_long_t *l) ...@@ -123,8 +122,12 @@ static inline long atomic_long_dec_return(atomic_long_t *l)
return (long)atomic64_dec_return(v); return (long)atomic64_dec_return(v);
} }
#define atomic_long_add_unless(l, a, u) \ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
atomic64_add_unless((atomic64_t *)(l), (a), (u)) {
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_add_unless(v, a, u);
}
#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) #define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
...@@ -236,8 +239,12 @@ static inline long atomic_long_dec_return(atomic_long_t *l) ...@@ -236,8 +239,12 @@ static inline long atomic_long_dec_return(atomic_long_t *l)
return (long)atomic_dec_return(v); return (long)atomic_dec_return(v);
} }
#define atomic_long_add_unless(l, a, u) \ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
atomic_add_unless((atomic_t *)(l), (a), (u)) {
atomic_t *v = (atomic_t *)l;
return (long)atomic_add_unless(v, a, u);
}
#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) #define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
......
...@@ -219,20 +219,21 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) ...@@ -219,20 +219,21 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
* Atomically adds @a to @v, so long as @v was not already @u. * Atomically adds @a to @v, so long as @v was not already @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
#define atomic_add_unless(v, a, u) \ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
({ \ {
__typeof__((v)->counter) c, old; \ int c, old;
c = atomic_read(v); \ c = atomic_read(v);
for (;;) { \ for (;;) {
if (unlikely(c == (u))) \ if (unlikely(c == (u)))
break; \ break;
old = atomic_cmpxchg((v), c, c + (a)); \ old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c)) \ if (likely(old == c))
break; \ break;
c = old; \ c = old;
} \ }
c != (u); \ return c != (u);
}) }
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_inc_return(v) (atomic_add_return(1,v)) #define atomic_inc_return(v) (atomic_add_return(1,v))
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
#include <asm/system.h>
/* /*
* On IA-64, counter must always be volatile to ensure that that the * On IA-64, counter must always be volatile to ensure that that the
...@@ -95,36 +96,38 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) ...@@ -95,36 +96,38 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
(cmpxchg(&((v)->counter), old, new)) (cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
({ \ {
__typeof__(v->counter) c, old; \ int c, old;
c = atomic_read(v); \ c = atomic_read(v);
for (;;) { \ for (;;) {
if (unlikely(c == (u))) \ if (unlikely(c == (u)))
break; \ break;
old = atomic_cmpxchg((v), c, c + (a)); \ old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c)) \ if (likely(old == c))
break; \ break;
c = old; \ c = old;
} \ }
c != (u); \ return c != (u);
}) }
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic64_add_unless(v, a, u) \ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
({ \ {
__typeof__(v->counter) c, old; \ long c, old;
c = atomic64_read(v); \ c = atomic64_read(v);
for (;;) { \ for (;;) {
if (unlikely(c == (u))) \ if (unlikely(c == (u)))
break; \ break;
old = atomic64_cmpxchg((v), c, c + (a)); \ old = atomic64_cmpxchg((v), c, c + (a));
if (likely(old == c)) \ if (likely(old == c))
break; \ break;
c = old; \ c = old;
} \ }
c != (u); \ return c != (u);
}) }
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#define atomic_add_return(i,v) \ #define atomic_add_return(i,v) \
......
...@@ -253,14 +253,21 @@ static __inline__ int atomic_dec_return(atomic_t *v) ...@@ -253,14 +253,21 @@ static __inline__ int atomic_dec_return(atomic_t *v)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
#define atomic_add_unless(v, a, u) \ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
({ \ {
int c, old; \ int c, old;
c = atomic_read(v); \ c = atomic_read(v);
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ for (;;) {
c = old; \ if (unlikely(c == (u)))
c != (u); \ break;
}) old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define __ARCH_M68K_ATOMIC__ #define __ARCH_M68K_ATOMIC__
#include <asm/system.h> /* local_irq_XXX() */ #include <asm/system.h>
/* /*
* Atomic operations that C can't guarantee us. Useful for * Atomic operations that C can't guarantee us. Useful for
...@@ -170,20 +170,21 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v) ...@@ -170,20 +170,21 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
__asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
} }
#define atomic_add_unless(v, a, u) \ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
({ \ {
int c, old; \ int c, old;
c = atomic_read(v); \ c = atomic_read(v);
for (;;) { \ for (;;) {
if (unlikely(c == (u))) \ if (unlikely(c == (u)))
break; \ break;
old = atomic_cmpxchg((v), c, c + (a)); \ old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c)) \ if (likely(old == c))
break; \ break;
c = old; \ c = old;
} \ }
c != (u); \ return c != (u);
}) }
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* Atomic operations are already serializing */ /* Atomic operations are already serializing */
......
#ifndef __ARCH_M68KNOMMU_ATOMIC__ #ifndef __ARCH_M68KNOMMU_ATOMIC__
#define __ARCH_M68KNOMMU_ATOMIC__ #define __ARCH_M68KNOMMU_ATOMIC__
#include <asm/system.h> /* local_irq_XXX() */ #include <asm/system.h>
/* /*
* Atomic operations that C can't guarantee us. Useful for * Atomic operations that C can't guarantee us. Useful for
...@@ -131,14 +131,21 @@ static inline int atomic_sub_return(int i, atomic_t * v) ...@@ -131,14 +131,21 @@ static inline int atomic_sub_return(int i, atomic_t * v)
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
({ \ {
int c, old; \ int c, old;
c = atomic_read(v); \ c = atomic_read(v);
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ for (;;) {
c = old; \ if (unlikely(c == (u)))
c != (u); \ break;
}) old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_dec_return(v) atomic_sub_return(1,(v))
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/war.h> #include <asm/war.h>
#include <asm/system.h>
typedef struct { volatile int counter; } atomic_t; typedef struct { volatile int counter; } atomic_t;
...@@ -318,14 +319,20 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) ...@@ -318,14 +319,20 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
#define atomic_add_unless(v, a, u) \ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
({ \ {
__typeof__((v)->counter) c, old; \ int c, old;
c = atomic_read(v); \ c = atomic_read(v);
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ for (;;) {
c = old; \ if (unlikely(c == (u)))
c != (u); \ break;
}) old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_dec_return(v) atomic_sub_return(1,(v))
...@@ -694,14 +701,21 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) ...@@ -694,14 +701,21 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
#define atomic64_add_unless(v, a, u) \ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
({ \ {
__typeof__((v)->counter) c, old; \ long c, old;
c = atomic_read(v); \ c = atomic64_read(v);
while (c != (u) && (old = atomic64_cmpxchg((v), c, c + (a))) != c) \ for (;;) {
c = old; \ if (unlikely(c == (u)))
c != (u); \ break;
}) old = atomic64_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#define atomic64_dec_return(v) atomic64_sub_return(1,(v)) #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define _ASM_PARISC_ATOMIC_H_ #define _ASM_PARISC_ATOMIC_H_
#include <linux/types.h> #include <linux/types.h>
#include <asm/system.h>
/* /*
* Atomic operations that C can't guarantee us. Useful for * Atomic operations that C can't guarantee us. Useful for
...@@ -174,14 +175,21 @@ static __inline__ int atomic_read(const atomic_t *v) ...@@ -174,14 +175,21 @@ static __inline__ int atomic_read(const atomic_t *v)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
#define atomic_add_unless(v, a, u) \ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
({ \ {
__typeof__((v)->counter) c, old; \ int c, old;
c = atomic_read(v); \ c = atomic_read(v);
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ for (;;) {
c = old; \ if (unlikely(c == (u)))
c != (u); \ break;
}) old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
...@@ -283,14 +291,21 @@ atomic64_read(const atomic64_t *v) ...@@ -283,14 +291,21 @@ atomic64_read(const atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
#define atomic64_add_unless(v, a, u) \ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
({ \ {
__typeof__((v)->counter) c, old; \ long c, old;
c = atomic64_read(v); \ c = atomic64_read(v);
while (c != (u) && (old = atomic64_cmpxchg((v), c, c + (a))) != c) \ for (;;) {
c = old; \ if (unlikely(c == (u)))
c != (u); \ break;
}) old = atomic64_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
......
...@@ -11,6 +11,7 @@ typedef struct { volatile int counter; } atomic_t; ...@@ -11,6 +11,7 @@ typedef struct { volatile int counter; } atomic_t;
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/synch.h> #include <asm/synch.h>
#include <asm/asm-compat.h> #include <asm/asm-compat.h>
#include <asm/system.h>
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/atomic.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
/* /*
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#define __ARCH_SPARC64_ATOMIC__ #define __ARCH_SPARC64_ATOMIC__
#include <linux/types.h> #include <linux/types.h>
#include <asm/system.h>
typedef struct { volatile int counter; } atomic_t; typedef struct { volatile int counter; } atomic_t;
typedef struct { volatile __s64 counter; } atomic64_t; typedef struct { volatile __s64 counter; } atomic64_t;
...@@ -73,40 +74,42 @@ extern int atomic64_sub_ret(int, atomic64_t *); ...@@ -73,40 +74,42 @@ extern int atomic64_sub_ret(int, atomic64_t *);
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
({ \ {
__typeof__((v)->counter) c, old; \ int c, old;
c = atomic_read(v); \ c = atomic_read(v);
for (;;) { \ for (;;) {
if (unlikely(c == (u))) \ if (unlikely(c == (u)))
break; \ break;
old = atomic_cmpxchg((v), c, c + (a)); \ old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c)) \ if (likely(old == c))
break; \ break;
c = old; \ c = old;
} \ }
likely(c != (u)); \ return c != (u);
}) }
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic64_cmpxchg(v, o, n) \ #define atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic64_add_unless(v, a, u) \ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
({ \ {
__typeof__((v)->counter) c, old; \ long c, old;
c = atomic64_read(v); \ c = atomic64_read(v);
for (;;) { \ for (;;) {
if (unlikely(c == (u))) \ if (unlikely(c == (u)))
break; \ break;
old = atomic64_cmpxchg((v), c, c + (a)); \ old = atomic64_cmpxchg((v), c, c + (a));
if (likely(old == c)) \ if (likely(old == c))
break; \ break;
c = old; \ c = old;
} \ }
likely(c != (u)); \ return c != (u);
}) }
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
/* Atomic operations are already serializing */ /* Atomic operations are already serializing */
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define __ARCH_X86_64_ATOMIC__ #define __ARCH_X86_64_ATOMIC__
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/system.h>
/* atomic_t should be 32 bit signed type */ /* atomic_t should be 32 bit signed type */
...@@ -403,20 +404,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) ...@@ -403,20 +404,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
#define atomic_add_unless(v, a, u) \ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
({ \ {
__typeof__((v)->counter) c, old; \ int c, old;
c = atomic_read(v); \ c = atomic_read(v);
for (;;) { \ for (;;) {
if (unlikely(c == (u))) \ if (unlikely(c == (u)))
break; \ break;
old = atomic_cmpxchg((v), c, c + (a)); \ old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c)) \ if (likely(old == c))
break; \ break;
c = old; \ c = old;
} \ }
c != (u); \ return c != (u);
}) }
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/** /**
...@@ -428,20 +430,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) ...@@ -428,20 +430,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
#define atomic64_add_unless(v, a, u) \ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
({ \ {
__typeof__((v)->counter) c, old; \ long c, old;
c = atomic64_read(v); \ c = atomic64_read(v);
for (;;) { \ for (;;) {
if (unlikely(c == (u))) \ if (unlikely(c == (u)))
break; \ break;
old = atomic64_cmpxchg((v), c, c + (a)); \ old = atomic64_cmpxchg((v), c, c + (a));
if (likely(old == c)) \ if (likely(old == c))
break; \ break;
c = old; \ c = old;
} \ }
c != (u); \ return c != (u);
}) }
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
/* These are x86-specific, used by some header files */ /* These are x86-specific, used by some header files */
......
...@@ -234,14 +234,21 @@ static inline int atomic_sub_return(int i, atomic_t * v) ...@@ -234,14 +234,21 @@ static inline int atomic_sub_return(int i, atomic_t * v)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
#define atomic_add_unless(v, a, u) \ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
({ \ {
int c, old; \ int c, old;
c = atomic_read(v); \ c = atomic_read(v);
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ for (;;) {
c = old; \ if (unlikely(c == (u)))
c != (u); \ break;
}) old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment