Commit 0e862838 authored by Alexander Lobakin's avatar Alexander Lobakin Committed by Yury Norov

bitops: unify non-atomic bitops prototypes across architectures

Currently, there is a mess with the prototypes of the non-atomic
bitops across the different architectures:

ret	bool, int, unsigned long
nr	int, long, unsigned int, unsigned long
addr	volatile unsigned long *, volatile void *

Thankfully, it doesn't provoke any bugs, but can sometimes make
the compiler angry when it's not handy at all.
Adjust all the prototypes to the following standard:

ret	bool				retval can be only 0 or 1
nr	unsigned long			native; signed makes no sense
addr	volatile unsigned long *	bitmaps are arrays of ulongs

Next, some architectures don't define 'arch_' versions as they don't
support instrumentation, others do. To make sure there is always the
same set of callables present and to ease any potential future
changes, make them all follow the rule:
 * architecture-specific files define only 'arch_' versions;
 * non-prefixed versions can be defined only in asm-generic files;
and place the non-prefixed definitions into a new file in
asm-generic to be included by non-instrumented architectures.

Finally, add some static assertions in order to prevent people from
making a mess in this room again.
I also used the %__always_inline attribute consistently, so that
they always get resolved to the actual operations.
Suggested-by: default avatarAndy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: default avatarAlexander Lobakin <alexandr.lobakin@intel.com>
Acked-by: default avatarMark Rutland <mark.rutland@arm.com>
Reviewed-by: default avatarYury Norov <yury.norov@gmail.com>
Reviewed-by: default avatarAndy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: default avatarYury Norov <yury.norov@gmail.com>
parent 21bb8af5
...@@ -46,8 +46,8 @@ set_bit(unsigned long nr, volatile void * addr) ...@@ -46,8 +46,8 @@ set_bit(unsigned long nr, volatile void * addr)
/* /*
* WARNING: non atomic version. * WARNING: non atomic version.
*/ */
static inline void static __always_inline void
__set_bit(unsigned long nr, volatile void * addr) arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
int *m = ((int *) addr) + (nr >> 5); int *m = ((int *) addr) + (nr >> 5);
...@@ -82,8 +82,8 @@ clear_bit_unlock(unsigned long nr, volatile void * addr) ...@@ -82,8 +82,8 @@ clear_bit_unlock(unsigned long nr, volatile void * addr)
/* /*
* WARNING: non atomic version. * WARNING: non atomic version.
*/ */
static __inline__ void static __always_inline void
__clear_bit(unsigned long nr, volatile void * addr) arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
int *m = ((int *) addr) + (nr >> 5); int *m = ((int *) addr) + (nr >> 5);
...@@ -94,7 +94,7 @@ static inline void ...@@ -94,7 +94,7 @@ static inline void
__clear_bit_unlock(unsigned long nr, volatile void * addr) __clear_bit_unlock(unsigned long nr, volatile void * addr)
{ {
smp_mb(); smp_mb();
__clear_bit(nr, addr); arch___clear_bit(nr, addr);
} }
static inline void static inline void
...@@ -118,8 +118,8 @@ change_bit(unsigned long nr, volatile void * addr) ...@@ -118,8 +118,8 @@ change_bit(unsigned long nr, volatile void * addr)
/* /*
* WARNING: non atomic version. * WARNING: non atomic version.
*/ */
static __inline__ void static __always_inline void
__change_bit(unsigned long nr, volatile void * addr) arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
int *m = ((int *) addr) + (nr >> 5); int *m = ((int *) addr) + (nr >> 5);
...@@ -186,8 +186,8 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr) ...@@ -186,8 +186,8 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr)
/* /*
* WARNING: non atomic version. * WARNING: non atomic version.
*/ */
static inline int static __always_inline bool
__test_and_set_bit(unsigned long nr, volatile void * addr) arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = 1 << (nr & 0x1f); unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5); int *m = ((int *) addr) + (nr >> 5);
...@@ -230,8 +230,8 @@ test_and_clear_bit(unsigned long nr, volatile void * addr) ...@@ -230,8 +230,8 @@ test_and_clear_bit(unsigned long nr, volatile void * addr)
/* /*
* WARNING: non atomic version. * WARNING: non atomic version.
*/ */
static inline int static __always_inline bool
__test_and_clear_bit(unsigned long nr, volatile void * addr) arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = 1 << (nr & 0x1f); unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5); int *m = ((int *) addr) + (nr >> 5);
...@@ -272,8 +272,8 @@ test_and_change_bit(unsigned long nr, volatile void * addr) ...@@ -272,8 +272,8 @@ test_and_change_bit(unsigned long nr, volatile void * addr)
/* /*
* WARNING: non atomic version. * WARNING: non atomic version.
*/ */
static __inline__ int static __always_inline bool
__test_and_change_bit(unsigned long nr, volatile void * addr) arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = 1 << (nr & 0x1f); unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5); int *m = ((int *) addr) + (nr >> 5);
...@@ -283,8 +283,8 @@ __test_and_change_bit(unsigned long nr, volatile void * addr) ...@@ -283,8 +283,8 @@ __test_and_change_bit(unsigned long nr, volatile void * addr)
return (old & mask) != 0; return (old & mask) != 0;
} }
static inline int static __always_inline bool
test_bit(int nr, const volatile void * addr) arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{ {
return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
} }
...@@ -450,6 +450,8 @@ sched_find_first_bit(const unsigned long b[2]) ...@@ -450,6 +450,8 @@ sched_find_first_bit(const unsigned long b[2])
return __ffs(tmp) + ofs; return __ffs(tmp) + ofs;
} }
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#include <asm-generic/bitops/le.h> #include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h> #include <asm-generic/bitops/ext2-atomic-setbit.h>
......
...@@ -127,38 +127,45 @@ static inline void change_bit(int nr, volatile void *addr) ...@@ -127,38 +127,45 @@ static inline void change_bit(int nr, volatile void *addr)
* be atomic, particularly for things like slab_lock and slab_unlock. * be atomic, particularly for things like slab_lock and slab_unlock.
* *
*/ */
static inline void __clear_bit(int nr, volatile unsigned long *addr) static __always_inline void
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
test_and_clear_bit(nr, addr); test_and_clear_bit(nr, addr);
} }
static inline void __set_bit(int nr, volatile unsigned long *addr) static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
test_and_set_bit(nr, addr); test_and_set_bit(nr, addr);
} }
static inline void __change_bit(int nr, volatile unsigned long *addr) static __always_inline void
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
test_and_change_bit(nr, addr); test_and_change_bit(nr, addr);
} }
/* Apparently, at least some of these are allowed to be non-atomic */ /* Apparently, at least some of these are allowed to be non-atomic */
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) static __always_inline bool
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
return test_and_clear_bit(nr, addr); return test_and_clear_bit(nr, addr);
} }
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) static __always_inline bool
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
return test_and_set_bit(nr, addr); return test_and_set_bit(nr, addr);
} }
static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) static __always_inline bool
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
return test_and_change_bit(nr, addr); return test_and_change_bit(nr, addr);
} }
static inline int __test_bit(int nr, const volatile unsigned long *addr) static __always_inline bool
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{ {
int retval; int retval;
...@@ -172,8 +179,6 @@ static inline int __test_bit(int nr, const volatile unsigned long *addr) ...@@ -172,8 +179,6 @@ static inline int __test_bit(int nr, const volatile unsigned long *addr)
return retval; return retval;
} }
#define test_bit(nr, addr) __test_bit(nr, addr)
/* /*
* ffz - find first zero in word. * ffz - find first zero in word.
* @word: The word to search * @word: The word to search
...@@ -271,6 +276,7 @@ static inline unsigned long __fls(unsigned long word) ...@@ -271,6 +276,7 @@ static inline unsigned long __fls(unsigned long word)
} }
#include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/sched.h>
......
...@@ -53,7 +53,7 @@ set_bit (int nr, volatile void *addr) ...@@ -53,7 +53,7 @@ set_bit (int nr, volatile void *addr)
} }
/** /**
* __set_bit - Set a bit in memory * arch___set_bit - Set a bit in memory
* @nr: the bit to set * @nr: the bit to set
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -61,8 +61,8 @@ set_bit (int nr, volatile void *addr) ...@@ -61,8 +61,8 @@ set_bit (int nr, volatile void *addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static __inline__ void static __always_inline void
__set_bit (int nr, volatile void *addr) arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
} }
...@@ -135,7 +135,7 @@ __clear_bit_unlock(int nr, void *addr) ...@@ -135,7 +135,7 @@ __clear_bit_unlock(int nr, void *addr)
} }
/** /**
* __clear_bit - Clears a bit in memory (non-atomic version) * arch___clear_bit - Clears a bit in memory (non-atomic version)
* @nr: the bit to clear * @nr: the bit to clear
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -143,8 +143,8 @@ __clear_bit_unlock(int nr, void *addr) ...@@ -143,8 +143,8 @@ __clear_bit_unlock(int nr, void *addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static __inline__ void static __always_inline void
__clear_bit (int nr, volatile void *addr) arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31)); *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
} }
...@@ -175,7 +175,7 @@ change_bit (int nr, volatile void *addr) ...@@ -175,7 +175,7 @@ change_bit (int nr, volatile void *addr)
} }
/** /**
* __change_bit - Toggle a bit in memory * arch___change_bit - Toggle a bit in memory
* @nr: the bit to toggle * @nr: the bit to toggle
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -183,8 +183,8 @@ change_bit (int nr, volatile void *addr) ...@@ -183,8 +183,8 @@ change_bit (int nr, volatile void *addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static __inline__ void static __always_inline void
__change_bit (int nr, volatile void *addr) arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31)); *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
} }
...@@ -224,7 +224,7 @@ test_and_set_bit (int nr, volatile void *addr) ...@@ -224,7 +224,7 @@ test_and_set_bit (int nr, volatile void *addr)
#define test_and_set_bit_lock test_and_set_bit #define test_and_set_bit_lock test_and_set_bit
/** /**
* __test_and_set_bit - Set a bit and return its old value * arch___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set * @nr: Bit to set
* @addr: Address to count from * @addr: Address to count from
* *
...@@ -232,8 +232,8 @@ test_and_set_bit (int nr, volatile void *addr) ...@@ -232,8 +232,8 @@ test_and_set_bit (int nr, volatile void *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static __inline__ int static __always_inline bool
__test_and_set_bit (int nr, volatile void *addr) arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
__u32 *p = (__u32 *) addr + (nr >> 5); __u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31); __u32 m = 1 << (nr & 31);
...@@ -269,7 +269,7 @@ test_and_clear_bit (int nr, volatile void *addr) ...@@ -269,7 +269,7 @@ test_and_clear_bit (int nr, volatile void *addr)
} }
/** /**
* __test_and_clear_bit - Clear a bit and return its old value * arch___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear * @nr: Bit to clear
* @addr: Address to count from * @addr: Address to count from
* *
...@@ -277,8 +277,8 @@ test_and_clear_bit (int nr, volatile void *addr) ...@@ -277,8 +277,8 @@ test_and_clear_bit (int nr, volatile void *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static __inline__ int static __always_inline bool
__test_and_clear_bit(int nr, volatile void * addr) arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
__u32 *p = (__u32 *) addr + (nr >> 5); __u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31); __u32 m = 1 << (nr & 31);
...@@ -314,14 +314,14 @@ test_and_change_bit (int nr, volatile void *addr) ...@@ -314,14 +314,14 @@ test_and_change_bit (int nr, volatile void *addr)
} }
/** /**
* __test_and_change_bit - Change a bit and return its old value * arch___test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change * @nr: Bit to change
* @addr: Address to count from * @addr: Address to count from
* *
* This operation is non-atomic and can be reordered. * This operation is non-atomic and can be reordered.
*/ */
static __inline__ int static __always_inline bool
__test_and_change_bit (int nr, void *addr) arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
__u32 old, bit = (1 << (nr & 31)); __u32 old, bit = (1 << (nr & 31));
__u32 *m = (__u32 *) addr + (nr >> 5); __u32 *m = (__u32 *) addr + (nr >> 5);
...@@ -331,8 +331,8 @@ __test_and_change_bit (int nr, void *addr) ...@@ -331,8 +331,8 @@ __test_and_change_bit (int nr, void *addr)
return (old & bit) != 0; return (old & bit) != 0;
} }
static __inline__ int static __always_inline bool
test_bit (int nr, const volatile void *addr) arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{ {
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
} }
...@@ -443,6 +443,8 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x) ...@@ -443,6 +443,8 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x)
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#include <asm-generic/bitops/le.h> #include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h> #include <asm-generic/bitops/ext2-atomic-setbit.h>
......
...@@ -65,8 +65,11 @@ static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr) ...@@ -65,8 +65,11 @@ static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
bfset_mem_set_bit(nr, vaddr)) bfset_mem_set_bit(nr, vaddr))
#endif #endif
#define __set_bit(nr, vaddr) set_bit(nr, vaddr) static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
set_bit(nr, addr);
}
static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr) static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
{ {
...@@ -105,8 +108,11 @@ static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr) ...@@ -105,8 +108,11 @@ static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
bfclr_mem_clear_bit(nr, vaddr)) bfclr_mem_clear_bit(nr, vaddr))
#endif #endif
#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr) static __always_inline void
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
clear_bit(nr, addr);
}
static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr) static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
{ {
...@@ -145,14 +151,17 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr) ...@@ -145,14 +151,17 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
bfchg_mem_change_bit(nr, vaddr)) bfchg_mem_change_bit(nr, vaddr))
#endif #endif
#define __change_bit(nr, vaddr) change_bit(nr, vaddr) static __always_inline void
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
static inline int test_bit(int nr, const volatile unsigned long *vaddr)
{ {
return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; change_bit(nr, addr);
} }
static __always_inline bool
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
return (addr[nr >> 5] & (1UL << (nr & 31))) != 0;
}
static inline int bset_reg_test_and_set_bit(int nr, static inline int bset_reg_test_and_set_bit(int nr,
volatile unsigned long *vaddr) volatile unsigned long *vaddr)
...@@ -201,8 +210,11 @@ static inline int bfset_mem_test_and_set_bit(int nr, ...@@ -201,8 +210,11 @@ static inline int bfset_mem_test_and_set_bit(int nr,
bfset_mem_test_and_set_bit(nr, vaddr)) bfset_mem_test_and_set_bit(nr, vaddr))
#endif #endif
#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr) static __always_inline bool
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}
static inline int bclr_reg_test_and_clear_bit(int nr, static inline int bclr_reg_test_and_clear_bit(int nr,
volatile unsigned long *vaddr) volatile unsigned long *vaddr)
...@@ -251,8 +263,11 @@ static inline int bfclr_mem_test_and_clear_bit(int nr, ...@@ -251,8 +263,11 @@ static inline int bfclr_mem_test_and_clear_bit(int nr,
bfclr_mem_test_and_clear_bit(nr, vaddr)) bfclr_mem_test_and_clear_bit(nr, vaddr))
#endif #endif
#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr) static __always_inline bool
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
return test_and_clear_bit(nr, addr);
}
static inline int bchg_reg_test_and_change_bit(int nr, static inline int bchg_reg_test_and_change_bit(int nr,
volatile unsigned long *vaddr) volatile unsigned long *vaddr)
...@@ -301,8 +316,11 @@ static inline int bfchg_mem_test_and_change_bit(int nr, ...@@ -301,8 +316,11 @@ static inline int bfchg_mem_test_and_change_bit(int nr,
bfchg_mem_test_and_change_bit(nr, vaddr)) bfchg_mem_test_and_change_bit(nr, vaddr))
#endif #endif
#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr) static __always_inline bool
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
return test_and_change_bit(nr, addr);
}
/* /*
* The true 68020 and more advanced processors support the "bfffo" * The true 68020 and more advanced processors support the "bfffo"
...@@ -522,6 +540,7 @@ static inline int __fls(int x) ...@@ -522,6 +540,7 @@ static inline int __fls(int x)
#define clear_bit_unlock clear_bit #define clear_bit_unlock clear_bit
#define __clear_bit_unlock clear_bit_unlock #define __clear_bit_unlock clear_bit_unlock
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h> #include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/sched.h>
......
...@@ -113,75 +113,76 @@ static inline bool arch_test_and_change_bit(unsigned long nr, ...@@ -113,75 +113,76 @@ static inline bool arch_test_and_change_bit(unsigned long nr,
return old & mask; return old & mask;
} }
static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr) static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long *addr = __bitops_word(nr, ptr); unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr); unsigned long mask = __bitops_mask(nr);
*addr |= mask; *p |= mask;
} }
static inline void arch___clear_bit(unsigned long nr, static __always_inline void
volatile unsigned long *ptr) arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long *addr = __bitops_word(nr, ptr); unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr); unsigned long mask = __bitops_mask(nr);
*addr &= ~mask; *p &= ~mask;
} }
static inline void arch___change_bit(unsigned long nr, static __always_inline void
volatile unsigned long *ptr) arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long *addr = __bitops_word(nr, ptr); unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr); unsigned long mask = __bitops_mask(nr);
*addr ^= mask; *p ^= mask;
} }
static inline bool arch___test_and_set_bit(unsigned long nr, static __always_inline bool
volatile unsigned long *ptr) arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long *addr = __bitops_word(nr, ptr); unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr); unsigned long mask = __bitops_mask(nr);
unsigned long old; unsigned long old;
old = *addr; old = *p;
*addr |= mask; *p |= mask;
return old & mask; return old & mask;
} }
static inline bool arch___test_and_clear_bit(unsigned long nr, static __always_inline bool
volatile unsigned long *ptr) arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long *addr = __bitops_word(nr, ptr); unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr); unsigned long mask = __bitops_mask(nr);
unsigned long old; unsigned long old;
old = *addr; old = *p;
*addr &= ~mask; *p &= ~mask;
return old & mask; return old & mask;
} }
static inline bool arch___test_and_change_bit(unsigned long nr, static __always_inline bool
volatile unsigned long *ptr) arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long *addr = __bitops_word(nr, ptr); unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr); unsigned long mask = __bitops_mask(nr);
unsigned long old; unsigned long old;
old = *addr; old = *p;
*addr ^= mask; *p ^= mask;
return old & mask; return old & mask;
} }
static inline bool arch_test_bit(unsigned long nr, static __always_inline bool
const volatile unsigned long *ptr) arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{ {
const volatile unsigned long *addr = __bitops_word(nr, ptr); const volatile unsigned long *p = __bitops_word(nr, addr);
unsigned long mask = __bitops_mask(nr); unsigned long mask = __bitops_mask(nr);
return *addr & mask; return *p & mask;
} }
static inline bool arch_test_and_set_bit_lock(unsigned long nr, static inline bool arch_test_and_set_bit_lock(unsigned long nr,
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
#ifndef __ASM_SH_BITOPS_OP32_H #ifndef __ASM_SH_BITOPS_OP32_H
#define __ASM_SH_BITOPS_OP32_H #define __ASM_SH_BITOPS_OP32_H
#include <linux/bits.h>
/* /*
* The bit modifying instructions on SH-2A are only capable of working * The bit modifying instructions on SH-2A are only capable of working
* with a 3-bit immediate, which signifies the shift position for the bit * with a 3-bit immediate, which signifies the shift position for the bit
...@@ -16,7 +18,8 @@ ...@@ -16,7 +18,8 @@
#define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE) #define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE)
#endif #endif
static inline void __set_bit(int nr, volatile unsigned long *addr) static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
if (__builtin_constant_p(nr)) { if (__builtin_constant_p(nr)) {
__asm__ __volatile__ ( __asm__ __volatile__ (
...@@ -33,7 +36,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) ...@@ -33,7 +36,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
} }
} }
static inline void __clear_bit(int nr, volatile unsigned long *addr) static __always_inline void
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
if (__builtin_constant_p(nr)) { if (__builtin_constant_p(nr)) {
__asm__ __volatile__ ( __asm__ __volatile__ (
...@@ -52,7 +56,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) ...@@ -52,7 +56,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
} }
/** /**
* __change_bit - Toggle a bit in memory * arch___change_bit - Toggle a bit in memory
* @nr: the bit to change * @nr: the bit to change
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -60,7 +64,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) ...@@ -60,7 +64,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static inline void __change_bit(int nr, volatile unsigned long *addr) static __always_inline void
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
if (__builtin_constant_p(nr)) { if (__builtin_constant_p(nr)) {
__asm__ __volatile__ ( __asm__ __volatile__ (
...@@ -79,7 +84,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) ...@@ -79,7 +84,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
} }
/** /**
* __test_and_set_bit - Set a bit and return its old value * arch___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set * @nr: Bit to set
* @addr: Address to count from * @addr: Address to count from
* *
...@@ -87,7 +92,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) ...@@ -87,7 +92,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) static __always_inline bool
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -98,7 +104,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) ...@@ -98,7 +104,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
} }
/** /**
* __test_and_clear_bit - Clear a bit and return its old value * arch___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear * @nr: Bit to clear
* @addr: Address to count from * @addr: Address to count from
* *
...@@ -106,7 +112,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) ...@@ -106,7 +112,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) static __always_inline bool
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -117,8 +124,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) ...@@ -117,8 +124,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
} }
/* WARNING: non atomic and it can be reordered! */ /* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(int nr, static __always_inline bool
volatile unsigned long *addr) arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -129,13 +136,16 @@ static inline int __test_and_change_bit(int nr, ...@@ -129,13 +136,16 @@ static inline int __test_and_change_bit(int nr,
} }
/** /**
* test_bit - Determine whether a bit is set * arch_test_bit - Determine whether a bit is set
* @nr: bit number to test * @nr: bit number to test
* @addr: Address to start counting from * @addr: Address to start counting from
*/ */
static inline int test_bit(int nr, const volatile unsigned long *addr) static __always_inline bool
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{ {
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
} }
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#endif /* __ASM_SH_BITOPS_OP32_H */ #endif /* __ASM_SH_BITOPS_OP32_H */
...@@ -63,7 +63,7 @@ arch_set_bit(long nr, volatile unsigned long *addr) ...@@ -63,7 +63,7 @@ arch_set_bit(long nr, volatile unsigned long *addr)
} }
static __always_inline void static __always_inline void
arch___set_bit(long nr, volatile unsigned long *addr) arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
} }
...@@ -89,7 +89,7 @@ arch_clear_bit_unlock(long nr, volatile unsigned long *addr) ...@@ -89,7 +89,7 @@ arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
} }
static __always_inline void static __always_inline void
arch___clear_bit(long nr, volatile unsigned long *addr) arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
} }
...@@ -114,7 +114,7 @@ arch___clear_bit_unlock(long nr, volatile unsigned long *addr) ...@@ -114,7 +114,7 @@ arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
} }
static __always_inline void static __always_inline void
arch___change_bit(long nr, volatile unsigned long *addr) arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
} }
...@@ -145,7 +145,7 @@ arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr) ...@@ -145,7 +145,7 @@ arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
} }
static __always_inline bool static __always_inline bool
arch___test_and_set_bit(long nr, volatile unsigned long *addr) arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
bool oldbit; bool oldbit;
...@@ -171,7 +171,7 @@ arch_test_and_clear_bit(long nr, volatile unsigned long *addr) ...@@ -171,7 +171,7 @@ arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
* this without also updating arch/x86/kernel/kvm.c * this without also updating arch/x86/kernel/kvm.c
*/ */
static __always_inline bool static __always_inline bool
arch___test_and_clear_bit(long nr, volatile unsigned long *addr) arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
bool oldbit; bool oldbit;
...@@ -183,7 +183,7 @@ arch___test_and_clear_bit(long nr, volatile unsigned long *addr) ...@@ -183,7 +183,7 @@ arch___test_and_clear_bit(long nr, volatile unsigned long *addr)
} }
static __always_inline bool static __always_inline bool
arch___test_and_change_bit(long nr, volatile unsigned long *addr) arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
bool oldbit; bool oldbit;
...@@ -219,10 +219,12 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l ...@@ -219,10 +219,12 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
return oldbit; return oldbit;
} }
#define arch_test_bit(nr, addr) \ static __always_inline bool
(__builtin_constant_p((nr)) \ arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
? constant_test_bit((nr), (addr)) \ {
: variable_test_bit((nr), (addr))) return __builtin_constant_p(nr) ? constant_test_bit(nr, addr) :
variable_test_bit(nr, addr);
}
/** /**
* __ffs - find first set bit in word * __ffs - find first set bit in word
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static __always_inline void static __always_inline void
generic___set_bit(unsigned int nr, volatile unsigned long *addr) generic___set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -33,7 +33,7 @@ generic___set_bit(unsigned int nr, volatile unsigned long *addr) ...@@ -33,7 +33,7 @@ generic___set_bit(unsigned int nr, volatile unsigned long *addr)
} }
static __always_inline void static __always_inline void
generic___clear_bit(unsigned int nr, volatile unsigned long *addr) generic___clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -50,8 +50,8 @@ generic___clear_bit(unsigned int nr, volatile unsigned long *addr) ...@@ -50,8 +50,8 @@ generic___clear_bit(unsigned int nr, volatile unsigned long *addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static __always_inline static __always_inline void
void generic___change_bit(unsigned int nr, volatile unsigned long *addr) generic___change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -68,8 +68,8 @@ void generic___change_bit(unsigned int nr, volatile unsigned long *addr) ...@@ -68,8 +68,8 @@ void generic___change_bit(unsigned int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static __always_inline int static __always_inline bool
generic___test_and_set_bit(unsigned int nr, volatile unsigned long *addr) generic___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -88,8 +88,8 @@ generic___test_and_set_bit(unsigned int nr, volatile unsigned long *addr) ...@@ -88,8 +88,8 @@ generic___test_and_set_bit(unsigned int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static __always_inline int static __always_inline bool
generic___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr) generic___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -100,8 +100,8 @@ generic___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr) ...@@ -100,8 +100,8 @@ generic___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr)
} }
/* WARNING: non atomic and it can be reordered! */ /* WARNING: non atomic and it can be reordered! */
static __always_inline int static __always_inline bool
generic___test_and_change_bit(unsigned int nr, volatile unsigned long *addr) generic___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -116,8 +116,8 @@ generic___test_and_change_bit(unsigned int nr, volatile unsigned long *addr) ...@@ -116,8 +116,8 @@ generic___test_and_change_bit(unsigned int nr, volatile unsigned long *addr)
* @nr: bit number to test * @nr: bit number to test
* @addr: Address to start counting from * @addr: Address to start counting from
*/ */
static __always_inline int static __always_inline bool
generic_test_bit(unsigned int nr, const volatile unsigned long *addr) generic_test_bit(unsigned long nr, const volatile unsigned long *addr)
{ {
/* /*
* Unlike the bitops with the '__' prefix above, this one *is* atomic, * Unlike the bitops with the '__' prefix above, this one *is* atomic,
......
...@@ -22,7 +22,8 @@ ...@@ -22,7 +22,8 @@
* region of memory concurrently, the effect may be that only one operation * region of memory concurrently, the effect may be that only one operation
* succeeds. * succeeds.
*/ */
static __always_inline void __set_bit(long nr, volatile unsigned long *addr) static __always_inline void
__set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
instrument_write(addr + BIT_WORD(nr), sizeof(long)); instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___set_bit(nr, addr); arch___set_bit(nr, addr);
...@@ -37,7 +38,8 @@ static __always_inline void __set_bit(long nr, volatile unsigned long *addr) ...@@ -37,7 +38,8 @@ static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
* region of memory concurrently, the effect may be that only one operation * region of memory concurrently, the effect may be that only one operation
* succeeds. * succeeds.
*/ */
static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) static __always_inline void
__clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
instrument_write(addr + BIT_WORD(nr), sizeof(long)); instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit(nr, addr); arch___clear_bit(nr, addr);
...@@ -52,7 +54,8 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) ...@@ -52,7 +54,8 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
* region of memory concurrently, the effect may be that only one operation * region of memory concurrently, the effect may be that only one operation
* succeeds. * succeeds.
*/ */
static __always_inline void __change_bit(long nr, volatile unsigned long *addr) static __always_inline void
__change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
instrument_write(addr + BIT_WORD(nr), sizeof(long)); instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___change_bit(nr, addr); arch___change_bit(nr, addr);
...@@ -90,7 +93,8 @@ static __always_inline void __instrument_read_write_bitop(long nr, volatile unsi ...@@ -90,7 +93,8 @@ static __always_inline void __instrument_read_write_bitop(long nr, volatile unsi
* This operation is non-atomic. If two instances of this operation race, one * This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail. * can appear to succeed but actually fail.
*/ */
static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) static __always_inline bool
__test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
__instrument_read_write_bitop(nr, addr); __instrument_read_write_bitop(nr, addr);
return arch___test_and_set_bit(nr, addr); return arch___test_and_set_bit(nr, addr);
...@@ -104,7 +108,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * ...@@ -104,7 +108,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
* This operation is non-atomic. If two instances of this operation race, one * This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail. * can appear to succeed but actually fail.
*/ */
static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) static __always_inline bool
__test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
__instrument_read_write_bitop(nr, addr); __instrument_read_write_bitop(nr, addr);
return arch___test_and_clear_bit(nr, addr); return arch___test_and_clear_bit(nr, addr);
...@@ -118,7 +123,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long ...@@ -118,7 +123,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
* This operation is non-atomic. If two instances of this operation race, one * This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail. * can appear to succeed but actually fail.
*/ */
static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) static __always_inline bool
__test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
__instrument_read_write_bitop(nr, addr); __instrument_read_write_bitop(nr, addr);
return arch___test_and_change_bit(nr, addr); return arch___test_and_change_bit(nr, addr);
...@@ -129,7 +135,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon ...@@ -129,7 +135,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
* @nr: bit number to test * @nr: bit number to test
* @addr: Address to start counting from * @addr: Address to start counting from
*/ */
static __always_inline bool test_bit(long nr, const volatile unsigned long *addr) static __always_inline bool
test_bit(unsigned long nr, const volatile unsigned long *addr)
{ {
instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long)); instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
return arch_test_bit(nr, addr); return arch_test_bit(nr, addr);
......
...@@ -5,24 +5,15 @@ ...@@ -5,24 +5,15 @@
#include <asm-generic/bitops/generic-non-atomic.h> #include <asm-generic/bitops/generic-non-atomic.h>
#define arch___set_bit generic___set_bit #define arch___set_bit generic___set_bit
#define __set_bit arch___set_bit
#define arch___clear_bit generic___clear_bit #define arch___clear_bit generic___clear_bit
#define __clear_bit arch___clear_bit
#define arch___change_bit generic___change_bit #define arch___change_bit generic___change_bit
#define __change_bit arch___change_bit
#define arch___test_and_set_bit generic___test_and_set_bit #define arch___test_and_set_bit generic___test_and_set_bit
#define __test_and_set_bit arch___test_and_set_bit
#define arch___test_and_clear_bit generic___test_and_clear_bit #define arch___test_and_clear_bit generic___test_and_clear_bit
#define __test_and_clear_bit arch___test_and_clear_bit
#define arch___test_and_change_bit generic___test_and_change_bit #define arch___test_and_change_bit generic___test_and_change_bit
#define __test_and_change_bit arch___test_and_change_bit
#define arch_test_bit generic_test_bit #define arch_test_bit generic_test_bit
#define test_bit arch_test_bit
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
#define __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
#define __set_bit arch___set_bit
#define __clear_bit arch___clear_bit
#define __change_bit arch___change_bit
#define __test_and_set_bit arch___test_and_set_bit
#define __test_and_clear_bit arch___test_and_clear_bit
#define __test_and_change_bit arch___test_and_change_bit
#define test_bit arch_test_bit
#endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */
...@@ -26,12 +26,29 @@ extern unsigned int __sw_hweight16(unsigned int w); ...@@ -26,12 +26,29 @@ extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w); extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w); extern unsigned long __sw_hweight64(__u64 w);
#include <asm-generic/bitops/generic-non-atomic.h>
/* /*
* Include this here because some architectures need generic_ffs/fls in * Include this here because some architectures need generic_ffs/fls in
* scope * scope
*/ */
#include <asm/bitops.h> #include <asm/bitops.h>
/* Check that the bitops prototypes are sane */
#define __check_bitop_pr(name) \
static_assert(__same_type(arch_##name, generic_##name) && \
__same_type(name, generic_##name))
__check_bitop_pr(__set_bit);
__check_bitop_pr(__clear_bit);
__check_bitop_pr(__change_bit);
__check_bitop_pr(__test_and_set_bit);
__check_bitop_pr(__test_and_clear_bit);
__check_bitop_pr(__test_and_change_bit);
__check_bitop_pr(test_bit);
#undef __check_bitop_pr
static inline int get_bitmask_order(unsigned int count) static inline int get_bitmask_order(unsigned int count)
{ {
int order; int order;
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#include <asm/types.h> #include <linux/bits.h>
/** /**
* __set_bit - Set a bit in memory * __set_bit - Set a bit in memory
...@@ -13,7 +13,8 @@ ...@@ -13,7 +13,8 @@
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static inline void __set_bit(int nr, volatile unsigned long *addr) static __always_inline void
__set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -21,7 +22,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) ...@@ -21,7 +22,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
*p |= mask; *p |= mask;
} }
static inline void __clear_bit(int nr, volatile unsigned long *addr) static __always_inline void
__clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -38,7 +40,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) ...@@ -38,7 +40,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static inline void __change_bit(int nr, volatile unsigned long *addr) static __always_inline void
__change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -55,7 +58,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) ...@@ -55,7 +58,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) static __always_inline bool
__test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -74,7 +78,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) ...@@ -74,7 +78,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) static __always_inline bool
__test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -85,8 +90,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) ...@@ -85,8 +90,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
} }
/* WARNING: non atomic and it can be reordered! */ /* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(int nr, static __always_inline bool
volatile unsigned long *addr) __test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
...@@ -101,7 +106,8 @@ static inline int __test_and_change_bit(int nr, ...@@ -101,7 +106,8 @@ static inline int __test_and_change_bit(int nr,
* @nr: bit number to test * @nr: bit number to test
* @addr: Address to start counting from * @addr: Address to start counting from
*/ */
static inline int test_bit(int nr, const volatile unsigned long *addr) static __always_inline bool
test_bit(unsigned long nr, const volatile unsigned long *addr)
{ {
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment