Commit b924a690 authored by Chen Gang's avatar Chen Gang Committed by Chris Metcalf

tile: include: asm: use 'long long' instead of 'u64' for atomic64_t and its related functions

atomic* value is signed value, and atomic* functions need also process
signed value (parameter value, and return value), so use 'long long'
instead of 'u64'.

After replacement, it will also fix a bug for atomic64_add_negative():
"u64 is never less than 0".

The modifications are:

  in vim, use "1,% s/\<u64\>/long long/g" command.
  remove redundant '__aligned(8)'.
  be sure of 80 (and macro '\') columns limitation after replacement.
Signed-off-by: default avatarChen Gang <gang.chen@asianux.com>
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> [re-instated const cast]
parent 4a10c2ac
...@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n) ...@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
* *
* Atomically sets @v to @i and returns old @v * Atomically sets @v to @i and returns old @v
*/ */
static inline u64 atomic64_xchg(atomic64_t *v, u64 n) static inline long long atomic64_xchg(atomic64_t *v, long long n)
{ {
return xchg64(&v->counter, n); return xchg64(&v->counter, n);
} }
...@@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n) ...@@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
* Atomically checks if @v holds @o and replaces it with @n if so. * Atomically checks if @v holds @o and replaces it with @n if so.
* Returns the old value at @v. * Returns the old value at @v.
*/ */
static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,
long long n)
{ {
return cmpxchg64(&v->counter, o, n); return cmpxchg64(&v->counter, o, n);
} }
......
...@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n) ...@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n)
/* A 64bit atomic type */ /* A 64bit atomic type */
typedef struct { typedef struct {
u64 __aligned(8) counter; long long counter;
} atomic64_t; } atomic64_t;
#define ATOMIC64_INIT(val) { (val) } #define ATOMIC64_INIT(val) { (val) }
...@@ -91,14 +91,14 @@ typedef struct { ...@@ -91,14 +91,14 @@ typedef struct {
* *
* Atomically reads the value of @v. * Atomically reads the value of @v.
*/ */
static inline u64 atomic64_read(const atomic64_t *v) static inline long long atomic64_read(const atomic64_t *v)
{ {
/* /*
* Requires an atomic op to read both 32-bit parts consistently. * Requires an atomic op to read both 32-bit parts consistently.
* Casting away const is safe since the atomic support routines * Casting away const is safe since the atomic support routines
* do not write to memory if the value has not been modified. * do not write to memory if the value has not been modified.
*/ */
return _atomic64_xchg_add((u64 *)&v->counter, 0); return _atomic64_xchg_add((long long *)&v->counter, 0);
} }
/** /**
...@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v) ...@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
* *
* Atomically adds @i to @v. * Atomically adds @i to @v.
*/ */
static inline void atomic64_add(u64 i, atomic64_t *v) static inline void atomic64_add(long long i, atomic64_t *v)
{ {
_atomic64_xchg_add(&v->counter, i); _atomic64_xchg_add(&v->counter, i);
} }
...@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v) ...@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
* *
* Atomically adds @i to @v and returns @i + @v * Atomically adds @i to @v and returns @i + @v
*/ */
static inline u64 atomic64_add_return(u64 i, atomic64_t *v) static inline long long atomic64_add_return(long long i, atomic64_t *v)
{ {
smp_mb(); /* barrier for proper semantics */ smp_mb(); /* barrier for proper semantics */
return _atomic64_xchg_add(&v->counter, i) + i; return _atomic64_xchg_add(&v->counter, i) + i;
...@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v) ...@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
* Atomically adds @a to @v, so long as @v was not already @u. * Atomically adds @a to @v, so long as @v was not already @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) static inline long long atomic64_add_unless(atomic64_t *v, long long a,
long long u)
{ {
smp_mb(); /* barrier for proper semantics */ smp_mb(); /* barrier for proper semantics */
return _atomic64_xchg_add_unless(&v->counter, a, u) != u; return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
...@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) ...@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
* atomic64_set() can't be just a raw store, since it would be lost if it * atomic64_set() can't be just a raw store, since it would be lost if it
* fell between the load and store of one of the other atomic ops. * fell between the load and store of one of the other atomic ops.
*/ */
static inline void atomic64_set(atomic64_t *v, u64 n) static inline void atomic64_set(atomic64_t *v, long long n)
{ {
_atomic64_xchg(&v->counter, n); _atomic64_xchg(&v->counter, n);
} }
...@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p, ...@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); long long o, long long n);
extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
extern u64 __atomic64_xchg_add_unless(volatile u64 *p, extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
int *lock, u64 o, u64 n); long long n);
extern long long __atomic64_xchg_add_unless(volatile long long *p,
int *lock, long long o, long long n);
/* Return failure from the atomic wrappers. */ /* Return failure from the atomic wrappers. */
struct __get_user __atomic_bad_address(int __user *addr); struct __get_user __atomic_bad_address(int __user *addr);
......
...@@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n); ...@@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n);
int _atomic_xchg_add(int *v, int i); int _atomic_xchg_add(int *v, int i);
int _atomic_xchg_add_unless(int *v, int a, int u); int _atomic_xchg_add_unless(int *v, int a, int u);
int _atomic_cmpxchg(int *ptr, int o, int n); int _atomic_cmpxchg(int *ptr, int o, int n);
u64 _atomic64_xchg(u64 *v, u64 n); long long _atomic64_xchg(long long *v, long long n);
u64 _atomic64_xchg_add(u64 *v, u64 i); long long _atomic64_xchg_add(long long *v, long long i);
u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u); long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); long long _atomic64_cmpxchg(long long *v, long long o, long long n);
#define xchg(ptr, n) \ #define xchg(ptr, n) \
({ \ ({ \
...@@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); ...@@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
if (sizeof(*(ptr)) != 4) \ if (sizeof(*(ptr)) != 4) \
__cmpxchg_called_with_bad_pointer(); \ __cmpxchg_called_with_bad_pointer(); \
smp_mb(); \ smp_mb(); \
(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \ (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \
(int)n); \
}) })
#define xchg64(ptr, n) \ #define xchg64(ptr, n) \
...@@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); ...@@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
if (sizeof(*(ptr)) != 8) \ if (sizeof(*(ptr)) != 8) \
__xchg_called_with_bad_pointer(); \ __xchg_called_with_bad_pointer(); \
smp_mb(); \ smp_mb(); \
(typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \ (typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \
(long long)(n)); \
}) })
#define cmpxchg64(ptr, o, n) \ #define cmpxchg64(ptr, o, n) \
...@@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); ...@@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
if (sizeof(*(ptr)) != 8) \ if (sizeof(*(ptr)) != 8) \
__cmpxchg_called_with_bad_pointer(); \ __cmpxchg_called_with_bad_pointer(); \
smp_mb(); \ smp_mb(); \
(typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \ (typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \
(long long)o, (long long)n); \
}) })
#else #else
...@@ -81,7 +84,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); ...@@ -81,7 +84,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 4: \ case 4: \
__x = (typeof(__x))(unsigned long) \ __x = (typeof(__x))(unsigned long) \
__insn_exch4((ptr), (u32)(unsigned long)(n)); \ __insn_exch4((ptr), \
(u32)(unsigned long)(n)); \
break; \ break; \
case 8: \ case 8: \
__x = (typeof(__x)) \ __x = (typeof(__x)) \
...@@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); ...@@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 4: \ case 4: \
__x = (typeof(__x))(unsigned long) \ __x = (typeof(__x))(unsigned long) \
__insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \ __insn_cmpexch4((ptr), \
(u32)(unsigned long)(n)); \
break; \ break; \
case 8: \ case 8: \
__x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \ __x = (typeof(__x))__insn_cmpexch((ptr), \
(long long)(n)); \
break; \ break; \
default: \ default: \
__cmpxchg_called_with_bad_pointer(); \ __cmpxchg_called_with_bad_pointer(); \
......
...@@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask) ...@@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
EXPORT_SYMBOL(_atomic_xor); EXPORT_SYMBOL(_atomic_xor);
u64 _atomic64_xchg(u64 *v, u64 n) long long _atomic64_xchg(long long *v, long long n)
{ {
return __atomic64_xchg(v, __atomic_setup(v), n); return __atomic64_xchg(v, __atomic_setup(v), n);
} }
EXPORT_SYMBOL(_atomic64_xchg); EXPORT_SYMBOL(_atomic64_xchg);
u64 _atomic64_xchg_add(u64 *v, u64 i) long long _atomic64_xchg_add(long long *v, long long i)
{ {
return __atomic64_xchg_add(v, __atomic_setup(v), i); return __atomic64_xchg_add(v, __atomic_setup(v), i);
} }
EXPORT_SYMBOL(_atomic64_xchg_add); EXPORT_SYMBOL(_atomic64_xchg_add);
u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
{ {
/* /*
* Note: argument order is switched here since it is easier * Note: argument order is switched here since it is easier
...@@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) ...@@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
} }
EXPORT_SYMBOL(_atomic64_xchg_add_unless); EXPORT_SYMBOL(_atomic64_xchg_add_unless);
u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n) long long _atomic64_cmpxchg(long long *v, long long o, long long n)
{ {
return __atomic64_cmpxchg(v, __atomic_setup(v), o, n); return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment