Commit b115d85a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'locking-core-2023-05-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:

 - Introduce local{,64}_try_cmpxchg() - a slightly more optimal
   primitive, which will be used in perf events ring-buffer code

 - Simplify/modify rwsems on PREEMPT_RT, to address writer starvation

 - Misc cleanups/fixes

* tag 'locking-core-2023-05-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/atomic: Correct (cmp)xchg() instrumentation
  locking/x86: Define arch_try_cmpxchg_local()
  locking/arch: Wire up local_try_cmpxchg()
  locking/generic: Wire up local{,64}_try_cmpxchg()
  locking/atomic: Add generic try_cmpxchg{,64}_local() support
  locking/rwbase: Mitigate indefinite writer starvation
  locking/arch: Rename all internal __xchg() names to __arch_xchg()
parents d5ed10bb ec570320
...@@ -6,14 +6,14 @@ ...@@ -6,14 +6,14 @@
* Atomic exchange routines. * Atomic exchange routines.
*/ */
#define ____xchg(type, args...) __xchg ## type ## _local(args) #define ____xchg(type, args...) __arch_xchg ## type ## _local(args)
#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) #define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
#include <asm/xchg.h> #include <asm/xchg.h>
#define xchg_local(ptr, x) \ #define xchg_local(ptr, x) \
({ \ ({ \
__typeof__(*(ptr)) _x_ = (x); \ __typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \ (__typeof__(*(ptr))) __arch_xchg_local((ptr), (unsigned long)_x_,\
sizeof(*(ptr))); \ sizeof(*(ptr))); \
}) })
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#undef ____xchg #undef ____xchg
#undef ____cmpxchg #undef ____cmpxchg
#define ____xchg(type, args...) __xchg ##type(args) #define ____xchg(type, args...) __arch_xchg ##type(args)
#define ____cmpxchg(type, args...) __cmpxchg ##type(args) #define ____cmpxchg(type, args...) __cmpxchg ##type(args)
#include <asm/xchg.h> #include <asm/xchg.h>
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
__typeof__(*(ptr)) _x_ = (x); \ __typeof__(*(ptr)) _x_ = (x); \
smp_mb(); \ smp_mb(); \
__ret = (__typeof__(*(ptr))) \ __ret = (__typeof__(*(ptr))) \
__xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ __arch_xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
smp_mb(); \ smp_mb(); \
__ret; \ __ret; \
}) })
......
...@@ -52,8 +52,16 @@ static __inline__ long local_sub_return(long i, local_t * l) ...@@ -52,8 +52,16 @@ static __inline__ long local_sub_return(long i, local_t * l)
return result; return result;
} }
#define local_cmpxchg(l, o, n) \ static __inline__ long local_cmpxchg(local_t *l, long old, long new)
(cmpxchg_local(&((l)->a.counter), (o), (n))) {
return cmpxchg_local(&l->a.counter, old, new);
}
static __inline__ bool local_try_cmpxchg(local_t *l, long *old, long new)
{
return try_cmpxchg_local(&l->a.counter, (s64 *)old, new);
}
#define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n))) #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
/** /**
......
...@@ -85,7 +85,7 @@ ...@@ -85,7 +85,7 @@
*/ */
#ifdef CONFIG_ARC_HAS_LLSC #ifdef CONFIG_ARC_HAS_LLSC
#define __xchg(ptr, val) \ #define __arch_xchg(ptr, val) \
({ \ ({ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" ex %0, [%1] \n" /* set new value */ \ " ex %0, [%1] \n" /* set new value */ \
...@@ -102,7 +102,7 @@ ...@@ -102,7 +102,7 @@
\ \
switch(sizeof(*(_p_))) { \ switch(sizeof(*(_p_))) { \
case 4: \ case 4: \
_val_ = __xchg(_p_, _val_); \ _val_ = __arch_xchg(_p_, _val_); \
break; \ break; \
default: \ default: \
BUILD_BUG(); \ BUILD_BUG(); \
......
...@@ -25,7 +25,8 @@ ...@@ -25,7 +25,8 @@
#define swp_is_buggy #define swp_is_buggy
#endif #endif
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) static inline unsigned long
__arch_xchg(unsigned long x, volatile void *ptr, int size)
{ {
extern void __bad_xchg(volatile void *, int); extern void __bad_xchg(volatile void *, int);
unsigned long ret; unsigned long ret;
...@@ -115,7 +116,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -115,7 +116,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
} }
#define arch_xchg_relaxed(ptr, x) ({ \ #define arch_xchg_relaxed(ptr, x) ({ \
(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \ (__typeof__(*(ptr)))__arch_xchg((unsigned long)(x), (ptr), \
sizeof(*(ptr))); \ sizeof(*(ptr))); \
}) })
......
...@@ -62,9 +62,8 @@ __XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory") ...@@ -62,9 +62,8 @@ __XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory")
#undef __XCHG_CASE #undef __XCHG_CASE
#define __XCHG_GEN(sfx) \ #define __XCHG_GEN(sfx) \
static __always_inline unsigned long __xchg##sfx(unsigned long x, \ static __always_inline unsigned long \
volatile void *ptr, \ __arch_xchg##sfx(unsigned long x, volatile void *ptr, int size) \
int size) \
{ \ { \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
...@@ -93,7 +92,7 @@ __XCHG_GEN(_mb) ...@@ -93,7 +92,7 @@ __XCHG_GEN(_mb)
({ \ ({ \
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr))) \ __ret = (__typeof__(*(ptr))) \
__xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \ __arch_xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
__ret; \ __ret; \
}) })
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#define _ASM_CMPXCHG_H #define _ASM_CMPXCHG_H
/* /*
* __xchg - atomically exchange a register and a memory location * __arch_xchg - atomically exchange a register and a memory location
* @x: value to swap * @x: value to swap
* @ptr: pointer to memory * @ptr: pointer to memory
* @size: size of the value * @size: size of the value
...@@ -19,8 +19,8 @@ ...@@ -19,8 +19,8 @@
* Note: there was an errata for V2 about .new's and memw_locked. * Note: there was an errata for V2 about .new's and memw_locked.
* *
*/ */
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, static inline unsigned long
int size) __arch_xchg(unsigned long x, volatile void *ptr, int size)
{ {
unsigned long retval; unsigned long retval;
...@@ -42,7 +42,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, ...@@ -42,7 +42,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* Atomically swap the contents of a register with memory. Should be atomic * Atomically swap the contents of a register with memory. Should be atomic
* between multiple CPU's and within interrupts on the same CPU. * between multiple CPU's and within interrupts on the same CPU.
*/ */
#define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \ #define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__arch_xchg((unsigned long)(v), (ptr), \
sizeof(*(ptr)))) sizeof(*(ptr))))
/* /*
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#include <uapi/asm/cmpxchg.h> #include <uapi/asm/cmpxchg.h>
#define arch_xchg(ptr, x) \ #define arch_xchg(ptr, x) \
({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));}) ({(__typeof__(*(ptr))) __arch_xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
#define arch_cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) #define arch_cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
#define arch_cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) #define arch_cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
*/ */
extern void ia64_xchg_called_with_bad_pointer(void); extern void ia64_xchg_called_with_bad_pointer(void);
#define __xchg(x, ptr, size) \ #define __arch_xchg(x, ptr, size) \
({ \ ({ \
unsigned long __xchg_result; \ unsigned long __xchg_result; \
\ \
...@@ -51,7 +51,7 @@ extern void ia64_xchg_called_with_bad_pointer(void); ...@@ -51,7 +51,7 @@ extern void ia64_xchg_called_with_bad_pointer(void);
#ifndef __KERNEL__ #ifndef __KERNEL__
#define xchg(ptr, x) \ #define xchg(ptr, x) \
({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));}) ({(__typeof__(*(ptr))) __arch_xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
#endif #endif
/* /*
......
...@@ -62,7 +62,7 @@ static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val, ...@@ -62,7 +62,7 @@ static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val,
} }
static __always_inline unsigned long static __always_inline unsigned long
__xchg(volatile void *ptr, unsigned long x, int size) __arch_xchg(volatile void *ptr, unsigned long x, int size)
{ {
switch (size) { switch (size) {
case 1: case 1:
...@@ -87,7 +87,7 @@ __xchg(volatile void *ptr, unsigned long x, int size) ...@@ -87,7 +87,7 @@ __xchg(volatile void *ptr, unsigned long x, int size)
__typeof__(*(ptr)) __res; \ __typeof__(*(ptr)) __res; \
\ \
__res = (__typeof__(*(ptr))) \ __res = (__typeof__(*(ptr))) \
__xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \ __arch_xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \
\ \
__res; \ __res; \
}) })
......
...@@ -56,8 +56,17 @@ static inline long local_sub_return(long i, local_t *l) ...@@ -56,8 +56,17 @@ static inline long local_sub_return(long i, local_t *l)
return result; return result;
} }
#define local_cmpxchg(l, o, n) \ static inline long local_cmpxchg(local_t *l, long old, long new)
((long)cmpxchg_local(&((l)->a.counter), (o), (n))) {
return cmpxchg_local(&l->a.counter, old, new);
}
static inline bool local_try_cmpxchg(local_t *l, long *old, long new)
{
typeof(l->a.counter) *__old = (typeof(l->a.counter) *) old;
return try_cmpxchg_local(&l->a.counter, __old, new);
}
#define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n))) #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
/** /**
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
extern unsigned long __invalid_xchg_size(unsigned long, volatile void *, int); extern unsigned long __invalid_xchg_size(unsigned long, volatile void *, int);
#ifndef CONFIG_RMW_INSNS #ifndef CONFIG_RMW_INSNS
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) static inline unsigned long __arch_xchg(unsigned long x, volatile void * ptr, int size)
{ {
unsigned long flags, tmp; unsigned long flags, tmp;
...@@ -40,7 +40,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz ...@@ -40,7 +40,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
return x; return x;
} }
#else #else
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) static inline unsigned long __arch_xchg(unsigned long x, volatile void * ptr, int size)
{ {
switch (size) { switch (size) {
case 1: case 1:
...@@ -75,7 +75,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz ...@@ -75,7 +75,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
} }
#endif #endif
#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));}) #define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__arch_xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
#include <asm-generic/cmpxchg-local.h> #include <asm-generic/cmpxchg-local.h>
......
...@@ -68,7 +68,7 @@ extern unsigned long __xchg_small(volatile void *ptr, unsigned long val, ...@@ -68,7 +68,7 @@ extern unsigned long __xchg_small(volatile void *ptr, unsigned long val,
unsigned int size); unsigned int size);
static __always_inline static __always_inline
unsigned long __xchg(volatile void *ptr, unsigned long x, int size) unsigned long __arch_xchg(volatile void *ptr, unsigned long x, int size)
{ {
switch (size) { switch (size) {
case 1: case 1:
...@@ -102,7 +102,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size) ...@@ -102,7 +102,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
smp_mb__before_llsc(); \ smp_mb__before_llsc(); \
\ \
__res = (__typeof__(*(ptr))) \ __res = (__typeof__(*(ptr))) \
__xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \ __arch_xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \
\ \
smp_llsc_mb(); \ smp_llsc_mb(); \
\ \
......
...@@ -94,8 +94,17 @@ static __inline__ long local_sub_return(long i, local_t * l) ...@@ -94,8 +94,17 @@ static __inline__ long local_sub_return(long i, local_t * l)
return result; return result;
} }
#define local_cmpxchg(l, o, n) \ static __inline__ long local_cmpxchg(local_t *l, long old, long new)
((long)cmpxchg_local(&((l)->a.counter), (o), (n))) {
return cmpxchg_local(&l->a.counter, old, new);
}
static __inline__ bool local_try_cmpxchg(local_t *l, long *old, long new)
{
typeof(l->a.counter) *__old = (typeof(l->a.counter) *) old;
return try_cmpxchg_local(&l->a.counter, __old, new);
}
#define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n))) #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
/** /**
......
...@@ -147,8 +147,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -147,8 +147,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
extern unsigned long __xchg_called_with_bad_pointer(void) extern unsigned long __xchg_called_with_bad_pointer(void)
__compiletime_error("Bad argument size for xchg"); __compiletime_error("Bad argument size for xchg");
static inline unsigned long __xchg(volatile void *ptr, unsigned long with, static inline unsigned long
int size) __arch_xchg(volatile void *ptr, unsigned long with, int size)
{ {
switch (size) { switch (size) {
case 1: case 1:
...@@ -163,7 +163,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long with, ...@@ -163,7 +163,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long with,
#define arch_xchg(ptr, with) \ #define arch_xchg(ptr, with) \
({ \ ({ \
(__typeof__(*(ptr))) __xchg((ptr), \ (__typeof__(*(ptr))) __arch_xchg((ptr), \
(unsigned long)(with), \ (unsigned long)(with), \
sizeof(*(ptr))); \ sizeof(*(ptr))); \
}) })
......
...@@ -22,7 +22,7 @@ extern unsigned long __xchg64(unsigned long, volatile unsigned long *); ...@@ -22,7 +22,7 @@ extern unsigned long __xchg64(unsigned long, volatile unsigned long *);
/* optimizer better get rid of switch since size is a constant */ /* optimizer better get rid of switch since size is a constant */
static inline unsigned long static inline unsigned long
__xchg(unsigned long x, volatile void *ptr, int size) __arch_xchg(unsigned long x, volatile void *ptr, int size)
{ {
switch (size) { switch (size) {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
...@@ -49,7 +49,7 @@ __xchg(unsigned long x, volatile void *ptr, int size) ...@@ -49,7 +49,7 @@ __xchg(unsigned long x, volatile void *ptr, int size)
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \ __typeof__(*(ptr)) _x_ = (x); \
__ret = (__typeof__(*(ptr))) \ __ret = (__typeof__(*(ptr))) \
__xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \ __arch_xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \
__ret; \ __ret; \
}) })
......
...@@ -229,7 +229,7 @@ __xchg_local(void *ptr, unsigned long x, unsigned int size) ...@@ -229,7 +229,7 @@ __xchg_local(void *ptr, unsigned long x, unsigned int size)
return __xchg_u64_local(ptr, x); return __xchg_u64_local(ptr, x);
#endif #endif
} }
BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg"); BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
return x; return x;
} }
...@@ -248,7 +248,7 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size) ...@@ -248,7 +248,7 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
return __xchg_u64_relaxed(ptr, x); return __xchg_u64_relaxed(ptr, x);
#endif #endif
} }
BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local"); BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_relaxed");
return x; return x;
} }
#define arch_xchg_local(ptr,x) \ #define arch_xchg_local(ptr,x) \
......
...@@ -90,6 +90,17 @@ static __inline__ long local_cmpxchg(local_t *l, long o, long n) ...@@ -90,6 +90,17 @@ static __inline__ long local_cmpxchg(local_t *l, long o, long n)
return t; return t;
} }
static __inline__ bool local_try_cmpxchg(local_t *l, long *po, long n)
{
long o = *po, r;
r = local_cmpxchg(l, o, n);
if (unlikely(r != o))
*po = r;
return likely(r == o);
}
static __inline__ long local_xchg(local_t *l, long n) static __inline__ long local_xchg(local_t *l, long n)
{ {
long t; long t;
......
...@@ -261,7 +261,7 @@ c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \ ...@@ -261,7 +261,7 @@ c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
static __always_inline \ static __always_inline \
c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \ c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
{ \ { \
return __xchg(&(v->counter), n, size); \ return __arch_xchg(&(v->counter), n, size); \
} \ } \
static __always_inline \ static __always_inline \
c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \ c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
......
...@@ -114,7 +114,7 @@ ...@@ -114,7 +114,7 @@
_x_, sizeof(*(ptr))); \ _x_, sizeof(*(ptr))); \
}) })
#define __xchg(ptr, new, size) \ #define __arch_xchg(ptr, new, size) \
({ \ ({ \
__typeof__(ptr) __ptr = (ptr); \ __typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \ __typeof__(new) __new = (new); \
...@@ -143,7 +143,7 @@ ...@@ -143,7 +143,7 @@
#define arch_xchg(ptr, x) \ #define arch_xchg(ptr, x) \
({ \ ({ \
__typeof__(*(ptr)) _x_ = (x); \ __typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \ (__typeof__(*(ptr))) __arch_xchg((ptr), _x_, sizeof(*(ptr))); \
}) })
#define xchg32(ptr, x) \ #define xchg32(ptr, x) \
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
void __xchg_called_with_bad_pointer(void); void __xchg_called_with_bad_pointer(void);
static __always_inline unsigned long __xchg(unsigned long x, static __always_inline unsigned long
unsigned long address, int size) __arch_xchg(unsigned long x, unsigned long address, int size)
{ {
unsigned long old; unsigned long old;
int shift; int shift;
...@@ -77,7 +77,7 @@ static __always_inline unsigned long __xchg(unsigned long x, ...@@ -77,7 +77,7 @@ static __always_inline unsigned long __xchg(unsigned long x,
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
\ \
__ret = (__typeof__(*(ptr))) \ __ret = (__typeof__(*(ptr))) \
__xchg((unsigned long)(x), (unsigned long)(ptr), \ __arch_xchg((unsigned long)(x), (unsigned long)(ptr), \
sizeof(*(ptr))); \ sizeof(*(ptr))); \
__ret; \ __ret; \
}) })
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
extern void __xchg_called_with_bad_pointer(void); extern void __xchg_called_with_bad_pointer(void);
#define __xchg(ptr, x, size) \ #define __arch_xchg(ptr, x, size) \
({ \ ({ \
unsigned long __xchg__res; \ unsigned long __xchg__res; \
volatile void *__xchg_ptr = (ptr); \ volatile void *__xchg_ptr = (ptr); \
...@@ -46,7 +46,7 @@ extern void __xchg_called_with_bad_pointer(void); ...@@ -46,7 +46,7 @@ extern void __xchg_called_with_bad_pointer(void);
}) })
#define arch_xchg(ptr,x) \ #define arch_xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) ((__typeof__(*(ptr)))__arch_xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
/* This function doesn't exist, so you'll get a linker error /* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg(). */ * if something tries to do an invalid cmpxchg(). */
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
unsigned long __xchg_u32(volatile u32 *m, u32 new); unsigned long __xchg_u32(volatile u32 *m, u32 new);
void __xchg_called_with_bad_pointer(void); void __xchg_called_with_bad_pointer(void);
static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) static inline unsigned long __arch_xchg(unsigned long x, __volatile__ void * ptr, int size)
{ {
switch (size) { switch (size) {
case 4: case 4:
...@@ -25,7 +25,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int ...@@ -25,7 +25,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
return x; return x;
} }
#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));}) #define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__arch_xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
/* Emulate cmpxchg() the same way we emulate atomics, /* Emulate cmpxchg() the same way we emulate atomics,
* by hashing the object address and indexing into an array * by hashing the object address and indexing into an array
......
...@@ -55,7 +55,7 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long ...@@ -55,7 +55,7 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
#define arch_xchg(ptr,x) \ #define arch_xchg(ptr,x) \
({ __typeof__(*(ptr)) __ret; \ ({ __typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr))) \ __ret = (__typeof__(*(ptr))) \
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ __arch_xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
__ret; \ __ret; \
}) })
...@@ -87,8 +87,8 @@ xchg16(__volatile__ unsigned short *m, unsigned short val) ...@@ -87,8 +87,8 @@ xchg16(__volatile__ unsigned short *m, unsigned short val)
return (load32 & mask) >> bit_shift; return (load32 & mask) >> bit_shift;
} }
static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, static inline unsigned long
int size) __arch_xchg(unsigned long x, __volatile__ void * ptr, int size)
{ {
switch (size) { switch (size) {
case 2: case 2:
......
...@@ -221,9 +221,15 @@ extern void __add_wrong_size(void) ...@@ -221,9 +221,15 @@ extern void __add_wrong_size(void)
#define __try_cmpxchg(ptr, pold, new, size) \ #define __try_cmpxchg(ptr, pold, new, size) \
__raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX) __raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX)
#define __try_cmpxchg_local(ptr, pold, new, size) \
__raw_try_cmpxchg((ptr), (pold), (new), (size), "")
#define arch_try_cmpxchg(ptr, pold, new) \ #define arch_try_cmpxchg(ptr, pold, new) \
__try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr))) __try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr)))
#define arch_try_cmpxchg_local(ptr, pold, new) \
__try_cmpxchg_local((ptr), (pold), (new), sizeof(*(ptr)))
/* /*
* xadd() adds "inc" to "*ptr" and atomically returns the previous * xadd() adds "inc" to "*ptr" and atomically returns the previous
* value of "*ptr". * value of "*ptr".
......
...@@ -120,8 +120,17 @@ static inline long local_sub_return(long i, local_t *l) ...@@ -120,8 +120,17 @@ static inline long local_sub_return(long i, local_t *l)
#define local_inc_return(l) (local_add_return(1, l)) #define local_inc_return(l) (local_add_return(1, l))
#define local_dec_return(l) (local_sub_return(1, l)) #define local_dec_return(l) (local_sub_return(1, l))
#define local_cmpxchg(l, o, n) \ static inline long local_cmpxchg(local_t *l, long old, long new)
(cmpxchg_local(&((l)->a.counter), (o), (n))) {
return cmpxchg_local(&l->a.counter, old, new);
}
static inline bool local_try_cmpxchg(local_t *l, long *old, long new)
{
typeof(l->a.counter) *__old = (typeof(l->a.counter) *) old;
return try_cmpxchg_local(&l->a.counter, __old, new);
}
/* Always has a lock prefix */ /* Always has a lock prefix */
#define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
......
...@@ -170,7 +170,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) ...@@ -170,7 +170,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
} }
#define arch_xchg(ptr,x) \ #define arch_xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) ((__typeof__(*(ptr)))__arch_xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
static inline u32 xchg_small(volatile void *ptr, u32 x, int size) static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
{ {
...@@ -203,7 +203,7 @@ static inline u32 xchg_small(volatile void *ptr, u32 x, int size) ...@@ -203,7 +203,7 @@ static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
extern void __xchg_called_with_bad_pointer(void); extern void __xchg_called_with_bad_pointer(void);
static __inline__ unsigned long static __inline__ unsigned long
__xchg(unsigned long x, volatile void * ptr, int size) __arch_xchg(unsigned long x, volatile void * ptr, int size)
{ {
switch (size) { switch (size) {
case 1: case 1:
......
...@@ -42,6 +42,7 @@ typedef struct ...@@ -42,6 +42,7 @@ typedef struct
#define local_inc_return(l) atomic_long_inc_return(&(l)->a) #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
#define local_try_cmpxchg(l, po, n) atomic_long_try_cmpxchg((&(l)->a), (po), (n))
#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
#define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u)) #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
......
...@@ -42,7 +42,16 @@ typedef struct { ...@@ -42,7 +42,16 @@ typedef struct {
#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a)) #define local64_sub_return(i, l) local_sub_return((i), (&(l)->a))
#define local64_inc_return(l) local_inc_return(&(l)->a) #define local64_inc_return(l) local_inc_return(&(l)->a)
#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n)) static inline s64 local64_cmpxchg(local64_t *l, s64 old, s64 new)
{
return local_cmpxchg(&l->a, old, new);
}
static inline bool local64_try_cmpxchg(local64_t *l, s64 *old, s64 new)
{
return local_try_cmpxchg(&l->a, (long *)old, new);
}
#define local64_xchg(l, n) local_xchg((&(l)->a), (n)) #define local64_xchg(l, n) local_xchg((&(l)->a), (n))
#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u)) #define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u))
#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a) #define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a)
...@@ -81,6 +90,7 @@ typedef struct { ...@@ -81,6 +90,7 @@ typedef struct {
#define local64_inc_return(l) atomic64_inc_return(&(l)->a) #define local64_inc_return(l) atomic64_inc_return(&(l)->a)
#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) #define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n))
#define local64_try_cmpxchg(l, po, n) atomic64_try_cmpxchg((&(l)->a), (po), (n))
#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) #define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n))
#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u)) #define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u))
#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a) #define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a)
......
...@@ -217,6 +217,28 @@ ...@@ -217,6 +217,28 @@
#endif /* arch_try_cmpxchg64_relaxed */ #endif /* arch_try_cmpxchg64_relaxed */
#ifndef arch_try_cmpxchg_local
#define arch_try_cmpxchg_local(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
___r = arch_cmpxchg_local((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
#endif /* arch_try_cmpxchg_local */
#ifndef arch_try_cmpxchg64_local
#define arch_try_cmpxchg64_local(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
___r = arch_cmpxchg64_local((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
#endif /* arch_try_cmpxchg64_local */
#ifndef arch_atomic_read_acquire #ifndef arch_atomic_read_acquire
static __always_inline int static __always_inline int
arch_atomic_read_acquire(const atomic_t *v) arch_atomic_read_acquire(const atomic_t *v)
...@@ -2646,4 +2668,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v) ...@@ -2646,4 +2668,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v)
#endif #endif
#endif /* _LINUX_ATOMIC_FALLBACK_H */ #endif /* _LINUX_ATOMIC_FALLBACK_H */
// 00071fffa021cec66f6290d706d69c91df87bade // ad2e2b4d168dbc60a73922616047a9bfa446af36
...@@ -1948,14 +1948,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -1948,14 +1948,14 @@ atomic_long_dec_if_positive(atomic_long_t *v)
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
kcsan_mb(); \ kcsan_mb(); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg(__ai_ptr, __VA_ARGS__); \ arch_xchg(__ai_ptr, __VA_ARGS__); \
}) })
#define xchg_acquire(ptr, ...) \ #define xchg_acquire(ptr, ...) \
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
}) })
...@@ -1963,14 +1963,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -1963,14 +1963,14 @@ atomic_long_dec_if_positive(atomic_long_t *v)
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
kcsan_release(); \ kcsan_release(); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_release(__ai_ptr, __VA_ARGS__); \ arch_xchg_release(__ai_ptr, __VA_ARGS__); \
}) })
#define xchg_relaxed(ptr, ...) \ #define xchg_relaxed(ptr, ...) \
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
}) })
...@@ -1978,14 +1978,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -1978,14 +1978,14 @@ atomic_long_dec_if_positive(atomic_long_t *v)
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
kcsan_mb(); \ kcsan_mb(); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
}) })
#define cmpxchg_acquire(ptr, ...) \ #define cmpxchg_acquire(ptr, ...) \
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
}) })
...@@ -1993,14 +1993,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -1993,14 +1993,14 @@ atomic_long_dec_if_positive(atomic_long_t *v)
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
kcsan_release(); \ kcsan_release(); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
}) })
#define cmpxchg_relaxed(ptr, ...) \ #define cmpxchg_relaxed(ptr, ...) \
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
}) })
...@@ -2008,14 +2008,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -2008,14 +2008,14 @@ atomic_long_dec_if_positive(atomic_long_t *v)
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
kcsan_mb(); \ kcsan_mb(); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
}) })
#define cmpxchg64_acquire(ptr, ...) \ #define cmpxchg64_acquire(ptr, ...) \
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
}) })
...@@ -2023,14 +2023,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -2023,14 +2023,14 @@ atomic_long_dec_if_positive(atomic_long_t *v)
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
kcsan_release(); \ kcsan_release(); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
}) })
#define cmpxchg64_relaxed(ptr, ...) \ #define cmpxchg64_relaxed(ptr, ...) \
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
}) })
...@@ -2039,8 +2039,8 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -2039,8 +2039,8 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \ typeof(oldp) __ai_oldp = (oldp); \
kcsan_mb(); \ kcsan_mb(); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \ arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
}) })
...@@ -2048,8 +2048,8 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -2048,8 +2048,8 @@ atomic_long_dec_if_positive(atomic_long_t *v)
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \ typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
}) })
...@@ -2058,8 +2058,8 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -2058,8 +2058,8 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \ typeof(oldp) __ai_oldp = (oldp); \
kcsan_release(); \ kcsan_release(); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
}) })
...@@ -2067,8 +2067,8 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -2067,8 +2067,8 @@ atomic_long_dec_if_positive(atomic_long_t *v)
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \ typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
}) })
...@@ -2077,8 +2077,8 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -2077,8 +2077,8 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \ typeof(oldp) __ai_oldp = (oldp); \
kcsan_mb(); \ kcsan_mb(); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \ arch_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \
}) })
...@@ -2086,8 +2086,8 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -2086,8 +2086,8 @@ atomic_long_dec_if_positive(atomic_long_t *v)
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \ typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ arch_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
}) })
...@@ -2096,8 +2096,8 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -2096,8 +2096,8 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \ typeof(oldp) __ai_oldp = (oldp); \
kcsan_release(); \ kcsan_release(); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ arch_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
}) })
...@@ -2105,22 +2105,22 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -2105,22 +2105,22 @@ atomic_long_dec_if_positive(atomic_long_t *v)
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \ typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ arch_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
}) })
#define cmpxchg_local(ptr, ...) \ #define cmpxchg_local(ptr, ...) \
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
}) })
#define cmpxchg64_local(ptr, ...) \ #define cmpxchg64_local(ptr, ...) \
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
}) })
...@@ -2128,15 +2128,33 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -2128,15 +2128,33 @@ atomic_long_dec_if_positive(atomic_long_t *v)
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
kcsan_mb(); \ kcsan_mb(); \
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
}) })
#define try_cmpxchg_local(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg64_local(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
arch_try_cmpxchg64_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define cmpxchg_double(ptr, ...) \ #define cmpxchg_double(ptr, ...) \
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
kcsan_mb(); \ kcsan_mb(); \
instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
}) })
...@@ -2144,9 +2162,9 @@ atomic_long_dec_if_positive(atomic_long_t *v) ...@@ -2144,9 +2162,9 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#define cmpxchg_double_local(ptr, ...) \ #define cmpxchg_double_local(ptr, ...) \
({ \ ({ \
typeof(ptr) __ai_ptr = (ptr); \ typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ instrument_atomic_read_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
}) })
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
// 1b485de9cbaa4900de59e14ee2084357eaeb1c3a // 6b513a42e1a1b5962532a019b7fc91eaa044ad5e
...@@ -72,15 +72,6 @@ static int __sched __rwbase_read_lock(struct rwbase_rt *rwb, ...@@ -72,15 +72,6 @@ static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
int ret; int ret;
raw_spin_lock_irq(&rtm->wait_lock); raw_spin_lock_irq(&rtm->wait_lock);
/*
* Allow readers, as long as the writer has not completely
* acquired the semaphore for write.
*/
if (atomic_read(&rwb->readers) != WRITER_BIAS) {
atomic_inc(&rwb->readers);
raw_spin_unlock_irq(&rtm->wait_lock);
return 0;
}
/* /*
* Call into the slow lock path with the rtmutex->wait_lock * Call into the slow lock path with the rtmutex->wait_lock
......
...@@ -225,6 +225,10 @@ for cmpxchg in "cmpxchg" "cmpxchg64"; do ...@@ -225,6 +225,10 @@ for cmpxchg in "cmpxchg" "cmpxchg64"; do
gen_try_cmpxchg_fallbacks "${cmpxchg}" gen_try_cmpxchg_fallbacks "${cmpxchg}"
done done
for cmpxchg in "cmpxchg_local" "cmpxchg64_local"; do
gen_try_cmpxchg_fallback "${cmpxchg}" ""
done
grep '^[a-z]' "$1" | while read name meta args; do grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic" "int" ${args} gen_proto "${meta}" "${name}" "atomic" "int" ${args}
done done
......
...@@ -104,8 +104,8 @@ cat <<EOF ...@@ -104,8 +104,8 @@ cat <<EOF
EOF EOF
[ -n "$kcsan_barrier" ] && printf "\t${kcsan_barrier}; \\\\\n" [ -n "$kcsan_barrier" ] && printf "\t${kcsan_barrier}; \\\\\n"
cat <<EOF cat <<EOF
instrument_atomic_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\ instrument_atomic_read_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\
instrument_atomic_write(__ai_oldp, ${mult}sizeof(*__ai_oldp)); \\ instrument_read_write(__ai_oldp, ${mult}sizeof(*__ai_oldp)); \\
arch_${xchg}${order}(__ai_ptr, __ai_oldp, __VA_ARGS__); \\ arch_${xchg}${order}(__ai_ptr, __ai_oldp, __VA_ARGS__); \\
}) })
EOF EOF
...@@ -119,7 +119,7 @@ cat <<EOF ...@@ -119,7 +119,7 @@ cat <<EOF
EOF EOF
[ -n "$kcsan_barrier" ] && printf "\t${kcsan_barrier}; \\\\\n" [ -n "$kcsan_barrier" ] && printf "\t${kcsan_barrier}; \\\\\n"
cat <<EOF cat <<EOF
instrument_atomic_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\ instrument_atomic_read_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\
arch_${xchg}${order}(__ai_ptr, __VA_ARGS__); \\ arch_${xchg}${order}(__ai_ptr, __VA_ARGS__); \\
}) })
EOF EOF
...@@ -173,7 +173,7 @@ for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg" "try_cmpxchg64"; do ...@@ -173,7 +173,7 @@ for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg" "try_cmpxchg64"; do
done done
done done
for xchg in "cmpxchg_local" "cmpxchg64_local" "sync_cmpxchg"; do for xchg in "cmpxchg_local" "cmpxchg64_local" "sync_cmpxchg" "try_cmpxchg_local" "try_cmpxchg64_local" ; do
gen_xchg "${xchg}" "" "" gen_xchg "${xchg}" "" ""
printf "\n" printf "\n"
done done
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment