Commit e52da357 authored by Joe Perches's avatar Joe Perches Committed by Ingo Molnar

include/asm-x86/cmpxchg_64.h: checkpatch cleanups - formatting only

Signed-off-by: default avatarJoe Perches <joe@perches.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 8121019c
...@@ -3,7 +3,8 @@ ...@@ -3,7 +3,8 @@
#include <asm/alternative.h> /* Provides LOCK_PREFIX */ #include <asm/alternative.h> /* Provides LOCK_PREFIX */
#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) #define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \
(ptr), sizeof(*(ptr))))
#define __xg(x) ((volatile long *)(x)) #define __xg(x) ((volatile long *)(x))
...@@ -19,33 +20,34 @@ static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) ...@@ -19,33 +20,34 @@ static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
* Note 2: xchg has side effect, so that attribute volatile is necessary, * Note 2: xchg has side effect, so that attribute volatile is necessary,
* but generally the primitive is invalid, *ptr is output argument. --ANK * but generally the primitive is invalid, *ptr is output argument. --ANK
*/ */
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
int size)
{ {
switch (size) { switch (size) {
case 1: case 1:
__asm__ __volatile__("xchgb %b0,%1" asm volatile("xchgb %b0,%1"
:"=q" (x) : "=q" (x)
:"m" (*__xg(ptr)), "0" (x) : "m" (*__xg(ptr)), "0" (x)
:"memory"); : "memory");
break; break;
case 2: case 2:
__asm__ __volatile__("xchgw %w0,%1" asm volatile("xchgw %w0,%1"
:"=r" (x) : "=r" (x)
:"m" (*__xg(ptr)), "0" (x) : "m" (*__xg(ptr)), "0" (x)
:"memory"); : "memory");
break; break;
case 4: case 4:
__asm__ __volatile__("xchgl %k0,%1" asm volatile("xchgl %k0,%1"
:"=r" (x) : "=r" (x)
:"m" (*__xg(ptr)), "0" (x) : "m" (*__xg(ptr)), "0" (x)
:"memory"); : "memory");
break; break;
case 8: case 8:
__asm__ __volatile__("xchgq %0,%1" asm volatile("xchgq %0,%1"
:"=r" (x) : "=r" (x)
:"m" (*__xg(ptr)), "0" (x) : "m" (*__xg(ptr)), "0" (x)
:"memory"); : "memory");
break; break;
} }
return x; return x;
} }
...@@ -64,61 +66,62 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -64,61 +66,62 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long prev; unsigned long prev;
switch (size) { switch (size) {
case 1: case 1:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
: "=a"(prev) : "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old) : "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
return prev; return prev;
case 2: case 2:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
: "=a"(prev) : "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old) : "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
return prev; return prev;
case 4: case 4:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2"
: "=a"(prev) : "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old) : "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
return prev; return prev;
case 8: case 8:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" asm volatile(LOCK_PREFIX "cmpxchgq %1,%2"
: "=a"(prev) : "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old) : "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
return prev; return prev;
} }
return old; return old;
} }
static inline unsigned long __cmpxchg_local(volatile void *ptr, static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old, unsigned long new, int size) unsigned long old,
unsigned long new, int size)
{ {
unsigned long prev; unsigned long prev;
switch (size) { switch (size) {
case 1: case 1:
__asm__ __volatile__("cmpxchgb %b1,%2" asm volatile("cmpxchgb %b1,%2"
: "=a"(prev) : "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old) : "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
return prev; return prev;
case 2: case 2:
__asm__ __volatile__("cmpxchgw %w1,%2" asm volatile("cmpxchgw %w1,%2"
: "=a"(prev) : "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old) : "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
return prev; return prev;
case 4: case 4:
__asm__ __volatile__("cmpxchgl %k1,%2" asm volatile("cmpxchgl %k1,%2"
: "=a"(prev) : "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old) : "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
return prev; return prev;
case 8: case 8:
__asm__ __volatile__("cmpxchgq %1,%2" asm volatile("cmpxchgq %1,%2"
: "=a"(prev) : "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old) : "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
return prev; return prev;
} }
return old; return old;
...@@ -126,19 +129,20 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, ...@@ -126,19 +129,20 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#define cmpxchg(ptr, o, n) \ #define cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)))) (unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64(ptr, o, n) \ #define cmpxchg64(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \ cmpxchg((ptr), (o), (n)); \
}) })
#define cmpxchg_local(ptr, o, n) \ #define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)))) (unsigned long)(n), \
sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) \ #define cmpxchg64_local(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \ cmpxchg_local((ptr), (o), (n)); \
}) })
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment