Commit d192ef7d authored by Greg Ungerer's avatar Greg Ungerer Committed by Linus Torvalds

[PATCH] m68knommu: optimized bitops operations for m68knommu

Here is a patch to provide faster bitops for m68knommu, using bset/bclr/bchg
and btst instructions, that do test-and-set on m68k and coldfire processors.
We do thus avoid the need for local_irq_save/local_irq_restore.

Patch original submitted by Philippe De Muyter <phdm@macqel.be>.
Signed-off-by: default avatarGreg Ungerer <gerg@snapgear.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 173feff2
...@@ -113,26 +113,20 @@ static __inline__ unsigned long ffz(unsigned long word) ...@@ -113,26 +113,20 @@ static __inline__ unsigned long ffz(unsigned long word)
static __inline__ void set_bit(int nr, volatile unsigned long * addr) static __inline__ void set_bit(int nr, volatile unsigned long * addr)
{ {
int * a = (int *) addr; #ifdef CONFIG_COLDFIRE
int mask; __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
unsigned long flags; : "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "d" (nr)
a += nr >> 5; : "%a0", "cc");
mask = 1 << (nr & 0x1f); #else
local_irq_save(flags); __asm__ __volatile__ ("bset %1,%0"
*a |= mask; : "+m" (((volatile char *)addr)[(nr^31) >> 3])
local_irq_restore(flags); : "di" (nr)
: "cc");
#endif
} }
static __inline__ void __set_bit(int nr, volatile unsigned long * addr) #define __set_bit(nr, addr) set_bit(nr, addr)
{
int * a = (int *) addr;
int mask;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
*a |= mask;
}
/* /*
* clear_bit() doesn't provide any barrier for the compiler. * clear_bit() doesn't provide any barrier for the compiler.
...@@ -142,132 +136,100 @@ static __inline__ void __set_bit(int nr, volatile unsigned long * addr) ...@@ -142,132 +136,100 @@ static __inline__ void __set_bit(int nr, volatile unsigned long * addr)
static __inline__ void clear_bit(int nr, volatile unsigned long * addr) static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
{ {
int * a = (int *) addr; #ifdef CONFIG_COLDFIRE
int mask; __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
unsigned long flags; : "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "d" (nr)
a += nr >> 5; : "%a0", "cc");
mask = 1 << (nr & 0x1f); #else
local_irq_save(flags); __asm__ __volatile__ ("bclr %1,%0"
*a &= ~mask; : "+m" (((volatile char *)addr)[(nr^31) >> 3])
local_irq_restore(flags); : "di" (nr)
: "cc");
#endif
} }
static __inline__ void __clear_bit(int nr, volatile unsigned long * addr) #define __clear_bit(nr, addr) clear_bit(nr, addr)
{
int * a = (int *) addr;
int mask;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
*a &= ~mask;
}
static __inline__ void change_bit(int nr, volatile unsigned long * addr) static __inline__ void change_bit(int nr, volatile unsigned long * addr)
{ {
int mask, flags; #ifdef CONFIG_COLDFIRE
unsigned long *ADDR = (unsigned long *) addr; __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
: "+m" (((volatile char *)addr)[(nr^31) >> 3])
ADDR += nr >> 5; : "d" (nr)
mask = 1 << (nr & 31); : "%a0", "cc");
local_irq_save(flags); #else
*ADDR ^= mask; __asm__ __volatile__ ("bchg %1,%0"
local_irq_restore(flags); : "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "di" (nr)
: "cc");
#endif
} }
static __inline__ void __change_bit(int nr, volatile unsigned long * addr) #define __change_bit(nr, addr) change_bit(nr, addr)
{
int mask;
unsigned long *ADDR = (unsigned long *) addr;
ADDR += nr >> 5;
mask = 1 << (nr & 31);
*ADDR ^= mask;
}
static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
{ {
int mask, retval; char retval;
volatile unsigned int *a = (volatile unsigned int *) addr;
unsigned long flags; #ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
a += nr >> 5; : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
mask = 1 << (nr & 0x1f); : "d" (nr)
local_irq_save(flags); : "%a0");
retval = (mask & *a) != 0; #else
*a |= mask; __asm__ __volatile__ ("bset %2,%1; sne %0"
local_irq_restore(flags); : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "di" (nr)
/* No clobber */);
#endif
return retval; return retval;
} }
static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * addr) #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
{
int mask, retval;
volatile unsigned int *a = (volatile unsigned int *) addr;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
retval = (mask & *a) != 0;
*a |= mask;
return retval;
}
static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
{ {
int mask, retval; char retval;
volatile unsigned int *a = (volatile unsigned int *) addr;
unsigned long flags; #ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
a += nr >> 5; : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
mask = 1 << (nr & 0x1f); : "d" (nr)
local_irq_save(flags); : "%a0");
retval = (mask & *a) != 0; #else
*a &= ~mask; __asm__ __volatile__ ("bclr %2,%1; sne %0"
local_irq_restore(flags); : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "di" (nr)
/* No clobber */);
#endif
return retval; return retval;
} }
static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * addr) #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
{
int mask, retval;
volatile unsigned int *a = (volatile unsigned int *) addr;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
retval = (mask & *a) != 0;
*a &= ~mask;
return retval;
}
static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
{ {
int mask, retval; char retval;
volatile unsigned int *a = (volatile unsigned int *) addr;
unsigned long flags; #ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
a += nr >> 5; : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
mask = 1 << (nr & 0x1f); : "d" (nr)
local_irq_save(flags); : "%a0");
retval = (mask & *a) != 0; #else
*a ^= mask; __asm__ __volatile__ ("bchg %2,%1; sne %0"
local_irq_restore(flags); : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "di" (nr)
/* No clobber */);
#endif
return retval; return retval;
} }
static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * addr) #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
{
int mask, retval;
volatile unsigned int *a = (volatile unsigned int *) addr;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
retval = (mask & *a) != 0;
*a ^= mask;
return retval;
}
/* /*
* This routine doesn't need to be atomic. * This routine doesn't need to be atomic.
...@@ -294,6 +256,8 @@ static __inline__ int __test_bit(int nr, const volatile unsigned long * addr) ...@@ -294,6 +256,8 @@ static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
#define find_first_zero_bit(addr, size) \ #define find_first_zero_bit(addr, size) \
find_next_zero_bit((addr), (size), 0) find_next_zero_bit((addr), (size), 0)
#define find_first_bit(addr, size) \
find_next_bit((addr), (size), 0)
static __inline__ int find_next_zero_bit (void * addr, int size, int offset) static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
{ {
...@@ -385,31 +349,39 @@ static __inline__ unsigned long find_next_bit(const unsigned long *addr, ...@@ -385,31 +349,39 @@ static __inline__ unsigned long find_next_bit(const unsigned long *addr,
static __inline__ int ext2_set_bit(int nr, volatile void * addr) static __inline__ int ext2_set_bit(int nr, volatile void * addr)
{ {
int mask, retval; char retval;
unsigned long flags;
volatile unsigned char *ADDR = (unsigned char *) addr; #ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
ADDR += nr >> 3; : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
mask = 1 << (nr & 0x07); : "d" (nr)
local_irq_save(flags); : "%a0");
retval = (mask & *ADDR) != 0; #else
*ADDR |= mask; __asm__ __volatile__ ("bset %2,%1; sne %0"
local_irq_restore(flags); : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
: "di" (nr)
/* No clobber */);
#endif
return retval; return retval;
} }
static __inline__ int ext2_clear_bit(int nr, volatile void * addr) static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
{ {
int mask, retval; char retval;
unsigned long flags;
volatile unsigned char *ADDR = (unsigned char *) addr; #ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
ADDR += nr >> 3; : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
mask = 1 << (nr & 0x07); : "d" (nr)
local_irq_save(flags); : "%a0");
retval = (mask & *ADDR) != 0; #else
*ADDR &= ~mask; __asm__ __volatile__ ("bclr %2,%1; sne %0"
local_irq_restore(flags); : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
: "di" (nr)
/* No clobber */);
#endif
return retval; return retval;
} }
...@@ -433,12 +405,21 @@ static __inline__ int ext2_clear_bit(int nr, volatile void * addr) ...@@ -433,12 +405,21 @@ static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
static __inline__ int ext2_test_bit(int nr, const volatile void * addr) static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
{ {
int mask; char retval;
const volatile unsigned char *ADDR = (const unsigned char *) addr;
#ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
: "=d" (retval)
: "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
: "%a0");
#else
__asm__ __volatile__ ("btst %2,%1; sne %0"
: "=d" (retval)
: "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
/* No clobber */);
#endif
ADDR += nr >> 3; return retval;
mask = 1 << (nr & 0x07);
return ((mask & *ADDR) != 0);
} }
#define ext2_find_first_zero_bit(addr, size) \ #define ext2_find_first_zero_bit(addr, size) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment