Commit 7edb3cd6 authored by Joe Perches's avatar Joe Perches Committed by Ingo Molnar

include/asm-x86/atomic_64.h: checkpatch cleanups - formatting only

Signed-off-by: default avatarJoe Perches <joe@perches.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 78ff12ee
...@@ -22,7 +22,9 @@ ...@@ -22,7 +22,9 @@
* on us. We need to use _exactly_ the address the user gave us, * on us. We need to use _exactly_ the address the user gave us,
* not some alias that contains the same information. * not some alias that contains the same information.
*/ */
typedef struct { int counter; } atomic_t; typedef struct {
int counter;
} atomic_t;
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
...@@ -41,7 +43,7 @@ typedef struct { int counter; } atomic_t; ...@@ -41,7 +43,7 @@ typedef struct { int counter; } atomic_t;
* *
* Atomically sets the value of @v to @i. * Atomically sets the value of @v to @i.
*/ */
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic_set(v, i) (((v)->counter) = (i))
/** /**
* atomic_add - add integer to atomic variable * atomic_add - add integer to atomic variable
...@@ -50,12 +52,11 @@ typedef struct { int counter; } atomic_t; ...@@ -50,12 +52,11 @@ typedef struct { int counter; } atomic_t;
* *
* Atomically adds @i to @v. * Atomically adds @i to @v.
*/ */
static __inline__ void atomic_add(int i, atomic_t *v) static inline void atomic_add(int i, atomic_t *v)
{ {
__asm__ __volatile__( asm volatile(LOCK_PREFIX "addl %1,%0"
LOCK_PREFIX "addl %1,%0" : "=m" (v->counter)
:"=m" (v->counter) : "ir" (i), "m" (v->counter));
:"ir" (i), "m" (v->counter));
} }
/** /**
...@@ -65,12 +66,11 @@ static __inline__ void atomic_add(int i, atomic_t *v) ...@@ -65,12 +66,11 @@ static __inline__ void atomic_add(int i, atomic_t *v)
* *
* Atomically subtracts @i from @v. * Atomically subtracts @i from @v.
*/ */
static __inline__ void atomic_sub(int i, atomic_t *v) static inline void atomic_sub(int i, atomic_t *v)
{ {
__asm__ __volatile__( asm volatile(LOCK_PREFIX "subl %1,%0"
LOCK_PREFIX "subl %1,%0" : "=m" (v->counter)
:"=m" (v->counter) : "ir" (i), "m" (v->counter));
:"ir" (i), "m" (v->counter));
} }
/** /**
...@@ -82,14 +82,13 @@ static __inline__ void atomic_sub(int i, atomic_t *v) ...@@ -82,14 +82,13 @@ static __inline__ void atomic_sub(int i, atomic_t *v)
* true if the result is zero, or false for all * true if the result is zero, or false for all
* other cases. * other cases.
*/ */
static __inline__ int atomic_sub_and_test(int i, atomic_t *v) static inline int atomic_sub_and_test(int i, atomic_t *v)
{ {
unsigned char c; unsigned char c;
__asm__ __volatile__( asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
LOCK_PREFIX "subl %2,%0; sete %1" : "=m" (v->counter), "=qm" (c)
:"=m" (v->counter), "=qm" (c) : "ir" (i), "m" (v->counter) : "memory");
:"ir" (i), "m" (v->counter) : "memory");
return c; return c;
} }
...@@ -99,12 +98,11 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) ...@@ -99,12 +98,11 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
* *
* Atomically increments @v by 1. * Atomically increments @v by 1.
*/ */
static __inline__ void atomic_inc(atomic_t *v) static inline void atomic_inc(atomic_t *v)
{ {
__asm__ __volatile__( asm volatile(LOCK_PREFIX "incl %0"
LOCK_PREFIX "incl %0" : "=m" (v->counter)
:"=m" (v->counter) : "m" (v->counter));
:"m" (v->counter));
} }
/** /**
...@@ -113,12 +111,11 @@ static __inline__ void atomic_inc(atomic_t *v) ...@@ -113,12 +111,11 @@ static __inline__ void atomic_inc(atomic_t *v)
* *
* Atomically decrements @v by 1. * Atomically decrements @v by 1.
*/ */
static __inline__ void atomic_dec(atomic_t *v) static inline void atomic_dec(atomic_t *v)
{ {
__asm__ __volatile__( asm volatile(LOCK_PREFIX "decl %0"
LOCK_PREFIX "decl %0" : "=m" (v->counter)
:"=m" (v->counter) : "m" (v->counter));
:"m" (v->counter));
} }
/** /**
...@@ -129,14 +126,13 @@ static __inline__ void atomic_dec(atomic_t *v) ...@@ -129,14 +126,13 @@ static __inline__ void atomic_dec(atomic_t *v)
* returns true if the result is 0, or false for all other * returns true if the result is 0, or false for all other
* cases. * cases.
*/ */
static __inline__ int atomic_dec_and_test(atomic_t *v) static inline int atomic_dec_and_test(atomic_t *v)
{ {
unsigned char c; unsigned char c;
__asm__ __volatile__( asm volatile(LOCK_PREFIX "decl %0; sete %1"
LOCK_PREFIX "decl %0; sete %1" : "=m" (v->counter), "=qm" (c)
:"=m" (v->counter), "=qm" (c) : "m" (v->counter) : "memory");
:"m" (v->counter) : "memory");
return c != 0; return c != 0;
} }
...@@ -148,14 +144,13 @@ static __inline__ int atomic_dec_and_test(atomic_t *v) ...@@ -148,14 +144,13 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
* and returns true if the result is zero, or false for all * and returns true if the result is zero, or false for all
* other cases. * other cases.
*/ */
static __inline__ int atomic_inc_and_test(atomic_t *v) static inline int atomic_inc_and_test(atomic_t *v)
{ {
unsigned char c; unsigned char c;
__asm__ __volatile__( asm volatile(LOCK_PREFIX "incl %0; sete %1"
LOCK_PREFIX "incl %0; sete %1" : "=m" (v->counter), "=qm" (c)
:"=m" (v->counter), "=qm" (c) : "m" (v->counter) : "memory");
:"m" (v->counter) : "memory");
return c != 0; return c != 0;
} }
...@@ -168,14 +163,13 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) ...@@ -168,14 +163,13 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
* if the result is negative, or false when * if the result is negative, or false when
* result is greater than or equal to zero. * result is greater than or equal to zero.
*/ */
static __inline__ int atomic_add_negative(int i, atomic_t *v) static inline int atomic_add_negative(int i, atomic_t *v)
{ {
unsigned char c; unsigned char c;
__asm__ __volatile__( asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
LOCK_PREFIX "addl %2,%0; sets %1" : "=m" (v->counter), "=qm" (c)
:"=m" (v->counter), "=qm" (c) : "ir" (i), "m" (v->counter) : "memory");
:"ir" (i), "m" (v->counter) : "memory");
return c; return c;
} }
...@@ -186,27 +180,28 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) ...@@ -186,27 +180,28 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
* *
* Atomically adds @i to @v and returns @i + @v * Atomically adds @i to @v and returns @i + @v
*/ */
static __inline__ int atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v)
{ {
int __i = i; int __i = i;
__asm__ __volatile__( asm volatile(LOCK_PREFIX "xaddl %0, %1"
LOCK_PREFIX "xaddl %0, %1" : "+r" (i), "+m" (v->counter)
:"+r" (i), "+m" (v->counter)
: : "memory"); : : "memory");
return i + __i; return i + __i;
} }
static __inline__ int atomic_sub_return(int i, atomic_t *v) static inline int atomic_sub_return(int i, atomic_t *v)
{ {
return atomic_add_return(-i,v); return atomic_add_return(-i, v);
} }
#define atomic_inc_return(v) (atomic_add_return(1,v)) #define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1,v)) #define atomic_dec_return(v) (atomic_sub_return(1, v))
/* An 64bit atomic type */ /* An 64bit atomic type */
typedef struct { long counter; } atomic64_t; typedef struct {
long counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
...@@ -226,7 +221,7 @@ typedef struct { long counter; } atomic64_t; ...@@ -226,7 +221,7 @@ typedef struct { long counter; } atomic64_t;
* *
* Atomically sets the value of @v to @i. * Atomically sets the value of @v to @i.
*/ */
#define atomic64_set(v,i) (((v)->counter) = (i)) #define atomic64_set(v, i) (((v)->counter) = (i))
/** /**
* atomic64_add - add integer to atomic64 variable * atomic64_add - add integer to atomic64 variable
...@@ -235,12 +230,11 @@ typedef struct { long counter; } atomic64_t; ...@@ -235,12 +230,11 @@ typedef struct { long counter; } atomic64_t;
* *
* Atomically adds @i to @v. * Atomically adds @i to @v.
*/ */
static __inline__ void atomic64_add(long i, atomic64_t *v) static inline void atomic64_add(long i, atomic64_t *v)
{ {
__asm__ __volatile__( asm volatile(LOCK_PREFIX "addq %1,%0"
LOCK_PREFIX "addq %1,%0" : "=m" (v->counter)
:"=m" (v->counter) : "ir" (i), "m" (v->counter));
:"ir" (i), "m" (v->counter));
} }
/** /**
...@@ -250,12 +244,11 @@ static __inline__ void atomic64_add(long i, atomic64_t *v) ...@@ -250,12 +244,11 @@ static __inline__ void atomic64_add(long i, atomic64_t *v)
* *
* Atomically subtracts @i from @v. * Atomically subtracts @i from @v.
*/ */
static __inline__ void atomic64_sub(long i, atomic64_t *v) static inline void atomic64_sub(long i, atomic64_t *v)
{ {
__asm__ __volatile__( asm volatile(LOCK_PREFIX "subq %1,%0"
LOCK_PREFIX "subq %1,%0" : "=m" (v->counter)
:"=m" (v->counter) : "ir" (i), "m" (v->counter));
:"ir" (i), "m" (v->counter));
} }
/** /**
...@@ -267,14 +260,13 @@ static __inline__ void atomic64_sub(long i, atomic64_t *v) ...@@ -267,14 +260,13 @@ static __inline__ void atomic64_sub(long i, atomic64_t *v)
* true if the result is zero, or false for all * true if the result is zero, or false for all
* other cases. * other cases.
*/ */
static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) static inline int atomic64_sub_and_test(long i, atomic64_t *v)
{ {
unsigned char c; unsigned char c;
__asm__ __volatile__( asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
LOCK_PREFIX "subq %2,%0; sete %1" : "=m" (v->counter), "=qm" (c)
:"=m" (v->counter), "=qm" (c) : "ir" (i), "m" (v->counter) : "memory");
:"ir" (i), "m" (v->counter) : "memory");
return c; return c;
} }
...@@ -284,12 +276,11 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) ...@@ -284,12 +276,11 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
* *
* Atomically increments @v by 1. * Atomically increments @v by 1.
*/ */
static __inline__ void atomic64_inc(atomic64_t *v) static inline void atomic64_inc(atomic64_t *v)
{ {
__asm__ __volatile__( asm volatile(LOCK_PREFIX "incq %0"
LOCK_PREFIX "incq %0" : "=m" (v->counter)
:"=m" (v->counter) : "m" (v->counter));
:"m" (v->counter));
} }
/** /**
...@@ -298,12 +289,11 @@ static __inline__ void atomic64_inc(atomic64_t *v) ...@@ -298,12 +289,11 @@ static __inline__ void atomic64_inc(atomic64_t *v)
* *
* Atomically decrements @v by 1. * Atomically decrements @v by 1.
*/ */
static __inline__ void atomic64_dec(atomic64_t *v) static inline void atomic64_dec(atomic64_t *v)
{ {
__asm__ __volatile__( asm volatile(LOCK_PREFIX "decq %0"
LOCK_PREFIX "decq %0" : "=m" (v->counter)
:"=m" (v->counter) : "m" (v->counter));
:"m" (v->counter));
} }
/** /**
...@@ -314,14 +304,13 @@ static __inline__ void atomic64_dec(atomic64_t *v) ...@@ -314,14 +304,13 @@ static __inline__ void atomic64_dec(atomic64_t *v)
* returns true if the result is 0, or false for all other * returns true if the result is 0, or false for all other
* cases. * cases.
*/ */
static __inline__ int atomic64_dec_and_test(atomic64_t *v) static inline int atomic64_dec_and_test(atomic64_t *v)
{ {
unsigned char c; unsigned char c;
__asm__ __volatile__( asm volatile(LOCK_PREFIX "decq %0; sete %1"
LOCK_PREFIX "decq %0; sete %1" : "=m" (v->counter), "=qm" (c)
:"=m" (v->counter), "=qm" (c) : "m" (v->counter) : "memory");
:"m" (v->counter) : "memory");
return c != 0; return c != 0;
} }
...@@ -333,14 +322,13 @@ static __inline__ int atomic64_dec_and_test(atomic64_t *v) ...@@ -333,14 +322,13 @@ static __inline__ int atomic64_dec_and_test(atomic64_t *v)
* and returns true if the result is zero, or false for all * and returns true if the result is zero, or false for all
* other cases. * other cases.
*/ */
static __inline__ int atomic64_inc_and_test(atomic64_t *v) static inline int atomic64_inc_and_test(atomic64_t *v)
{ {
unsigned char c; unsigned char c;
__asm__ __volatile__( asm volatile(LOCK_PREFIX "incq %0; sete %1"
LOCK_PREFIX "incq %0; sete %1" : "=m" (v->counter), "=qm" (c)
:"=m" (v->counter), "=qm" (c) : "m" (v->counter) : "memory");
:"m" (v->counter) : "memory");
return c != 0; return c != 0;
} }
...@@ -353,14 +341,13 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v) ...@@ -353,14 +341,13 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v)
* if the result is negative, or false when * if the result is negative, or false when
* result is greater than or equal to zero. * result is greater than or equal to zero.
*/ */
static __inline__ int atomic64_add_negative(long i, atomic64_t *v) static inline int atomic64_add_negative(long i, atomic64_t *v)
{ {
unsigned char c; unsigned char c;
__asm__ __volatile__( asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
LOCK_PREFIX "addq %2,%0; sets %1" : "=m" (v->counter), "=qm" (c)
:"=m" (v->counter), "=qm" (c) : "ir" (i), "m" (v->counter) : "memory");
:"ir" (i), "m" (v->counter) : "memory");
return c; return c;
} }
...@@ -371,29 +358,28 @@ static __inline__ int atomic64_add_negative(long i, atomic64_t *v) ...@@ -371,29 +358,28 @@ static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
* *
* Atomically adds @i to @v and returns @i + @v * Atomically adds @i to @v and returns @i + @v
*/ */
static __inline__ long atomic64_add_return(long i, atomic64_t *v) static inline long atomic64_add_return(long i, atomic64_t *v)
{ {
long __i = i; long __i = i;
__asm__ __volatile__( asm volatile(LOCK_PREFIX "xaddq %0, %1;"
LOCK_PREFIX "xaddq %0, %1;" : "+r" (i), "+m" (v->counter)
:"+r" (i), "+m" (v->counter)
: : "memory"); : : "memory");
return i + __i; return i + __i;
} }
static __inline__ long atomic64_sub_return(long i, atomic64_t *v) static inline long atomic64_sub_return(long i, atomic64_t *v)
{ {
return atomic64_add_return(-i,v); return atomic64_add_return(-i, v);
} }
#define atomic64_inc_return(v) (atomic64_add_return(1,v)) #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
#define atomic64_dec_return(v) (atomic64_sub_return(1,v)) #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
/** /**
* atomic_add_unless - add unless the number is a given value * atomic_add_unless - add unless the number is a given value
...@@ -404,7 +390,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) ...@@ -404,7 +390,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) static inline int atomic_add_unless(atomic_t *v, int a, int u)
{ {
int c, old; int c, old;
c = atomic_read(v); c = atomic_read(v);
...@@ -430,7 +416,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) ...@@ -430,7 +416,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
{ {
long c, old; long c, old;
c = atomic64_read(v); c = atomic64_read(v);
...@@ -449,12 +435,13 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) ...@@ -449,12 +435,13 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
/* These are x86-specific, used by some header files */ /* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \ #define atomic_clear_mask(mask, addr) \
__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ asm volatile(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)),"m" (*addr) : "memory") : : "r" (~(mask)), "m" (*(addr)) : "memory")
#define atomic_set_mask(mask, addr) \ #define atomic_set_mask(mask, addr) \
__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ asm volatile(LOCK_PREFIX "orl %0,%1" \
: : "r" ((unsigned)mask),"m" (*(addr)) : "memory") : : "r" ((unsigned)(mask)), "m" (*(addr)) \
: "memory")
/* Atomic operations are already serializing on x86 */ /* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic_dec() barrier() #define smp_mb__before_atomic_dec() barrier()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment