Commit 247dbcdb authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

bitops: add xor_unlock_is_negative_byte()

Replace clear_bit_and_unlock_is_negative_byte() with
xor_unlock_is_negative_byte().  We have a few places that like to lock a
folio, set a flag and unlock it again.  Allow for the possibility of
combining the latter two operations for efficiency.  We are guaranteed
that the caller holds the lock, so it is safe to unlock it with the xor. 
The caller must guarantee that nobody else will set the flag without
holding the lock; it is not safe to do this with the PG_dirty flag, for
example.

Link: https://lkml.kernel.org/r/20231004165317.1061855-8-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Henderson <richard.henderson@linaro.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 7a4847e5
...@@ -234,32 +234,25 @@ static inline int arch_test_and_change_bit(unsigned long nr, ...@@ -234,32 +234,25 @@ static inline int arch_test_and_change_bit(unsigned long nr,
} }
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
static inline unsigned long static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
clear_bit_unlock_return_word(int nr, volatile unsigned long *addr) volatile unsigned long *p)
{ {
unsigned long old, t; unsigned long old, t;
unsigned long *p = (unsigned long *)addr + BIT_WORD(nr);
unsigned long mask = BIT_MASK(nr);
__asm__ __volatile__ ( __asm__ __volatile__ (
PPC_RELEASE_BARRIER PPC_RELEASE_BARRIER
"1:" PPC_LLARX "%0,0,%3,0\n" "1:" PPC_LLARX "%0,0,%3,0\n"
"andc %1,%0,%2\n" "xor %1,%0,%2\n"
PPC_STLCX "%1,0,%3\n" PPC_STLCX "%1,0,%3\n"
"bne- 1b\n" "bne- 1b\n"
: "=&r" (old), "=&r" (t) : "=&r" (old), "=&r" (t)
: "r" (mask), "r" (p) : "r" (mask), "r" (p)
: "cc", "memory"); : "cc", "memory");
return old; return (old & BIT_MASK(7)) != 0;
} }
/* #define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
* This is a special function for mm/filemap.c
* Bit 7 corresponds to PG_waiters.
*/
#define arch_clear_bit_unlock_is_negative_byte(nr, addr) \
(clear_bit_unlock_return_word(nr, addr) & BIT_MASK(7))
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
......
...@@ -94,18 +94,17 @@ arch___clear_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -94,18 +94,17 @@ arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
} }
static __always_inline bool static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
arch_clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) volatile unsigned long *addr)
{ {
bool negative; bool negative;
asm volatile(LOCK_PREFIX "andb %2,%1" asm volatile(LOCK_PREFIX "xorb %2,%1"
CC_SET(s) CC_SET(s)
: CC_OUT(s) (negative), WBYTE_ADDR(addr) : CC_OUT(s) (negative), WBYTE_ADDR(addr)
: "ir" ((char) ~(1 << nr)) : "memory"); : "iq" ((char)mask) : "memory");
return negative; return negative;
} }
#define arch_clear_bit_unlock_is_negative_byte \ #define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
arch_clear_bit_unlock_is_negative_byte
static __always_inline void static __always_inline void
arch___clear_bit_unlock(long nr, volatile unsigned long *addr) arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
......
...@@ -58,27 +58,30 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) ...@@ -58,27 +58,30 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
return arch_test_and_set_bit_lock(nr, addr); return arch_test_and_set_bit_lock(nr, addr);
} }
#if defined(arch_clear_bit_unlock_is_negative_byte) #if defined(arch_xor_unlock_is_negative_byte)
/** /**
* clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom * xor_unlock_is_negative_byte - XOR a single byte in memory and test if
* byte is negative, for unlock. * it is negative, for unlock.
* @nr: the bit to clear * @mask: Change the bits which are set in this mask.
* @addr: the address to start counting from * @addr: The address of the word containing the byte to change.
* *
* Changes some of bits 0-6 in the word pointed to by @addr.
* This operation is atomic and provides release barrier semantics. * This operation is atomic and provides release barrier semantics.
* Used to optimise some folio operations which are commonly paired
* with an unlock or end of writeback. Bit 7 is used as PG_waiters to
* indicate whether anybody is waiting for the unlock.
* *
* This is a bit of a one-trick-pony for the filemap code, which clears * Return: Whether the top bit of the byte is set.
* PG_locked and tests PG_waiters,
*/ */
static inline bool static inline bool xor_unlock_is_negative_byte(unsigned long mask,
clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) volatile unsigned long *addr)
{ {
kcsan_release(); kcsan_release();
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); instrument_atomic_write(addr, sizeof(long));
return arch_clear_bit_unlock_is_negative_byte(nr, addr); return arch_xor_unlock_is_negative_byte(mask, addr);
} }
/* Let everybody know we have it. */ /* Let everybody know we have it. */
#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte #define xor_unlock_is_negative_byte xor_unlock_is_negative_byte
#endif #endif
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */ #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */
...@@ -66,27 +66,16 @@ arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p) ...@@ -66,27 +66,16 @@ arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
raw_atomic_long_set_release((atomic_long_t *)p, old); raw_atomic_long_set_release((atomic_long_t *)p, old);
} }
/** #ifndef arch_xor_unlock_is_negative_byte
* arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
* byte is negative, for unlock.
* @nr: the bit to clear
* @addr: the address to start counting from
*
* This is a bit of a one-trick-pony for the filemap code, which clears
* PG_locked and tests PG_waiters,
*/
#ifndef arch_clear_bit_unlock_is_negative_byte
static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr,
volatile unsigned long *p) volatile unsigned long *p)
{ {
long old; long old;
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr); old = raw_atomic_long_fetch_xor_release(mask, (atomic_long_t *)p);
old = raw_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
return !!(old & BIT(7)); return !!(old & BIT(7));
} }
#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte #define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
#endif #endif
#include <asm-generic/bitops/instrumented-lock.h> #include <asm-generic/bitops/instrumented-lock.h>
......
...@@ -700,10 +700,10 @@ static void test_barrier_nothreads(struct kunit *test) ...@@ -700,10 +700,10 @@ static void test_barrier_nothreads(struct kunit *test)
KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false); KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false);
KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true); KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true);
#ifdef clear_bit_unlock_is_negative_byte #ifdef xor_unlock_is_negative_byte
KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); KCSAN_EXPECT_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true);
KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); KCSAN_EXPECT_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true);
KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); KCSAN_EXPECT_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true);
#endif #endif
kcsan_nestable_atomic_end(); kcsan_nestable_atomic_end();
} }
......
...@@ -228,10 +228,10 @@ static bool __init test_barrier(void) ...@@ -228,10 +228,10 @@ static bool __init test_barrier(void)
spin_lock(&test_spinlock); spin_lock(&test_spinlock);
KCSAN_CHECK_RW_BARRIER(spin_unlock(&test_spinlock)); KCSAN_CHECK_RW_BARRIER(spin_unlock(&test_spinlock));
#ifdef clear_bit_unlock_is_negative_byte #ifdef xor_unlock_is_negative_byte
KCSAN_CHECK_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); KCSAN_CHECK_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var));
KCSAN_CHECK_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); KCSAN_CHECK_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var));
KCSAN_CHECK_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); KCSAN_CHECK_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var));
#endif #endif
kcsan_nestable_atomic_end(); kcsan_nestable_atomic_end();
......
...@@ -1482,6 +1482,11 @@ void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter) ...@@ -1482,6 +1482,11 @@ void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
} }
EXPORT_SYMBOL_GPL(folio_add_wait_queue); EXPORT_SYMBOL_GPL(folio_add_wait_queue);
#ifdef xor_unlock_is_negative_byte
#define clear_bit_unlock_is_negative_byte(nr, p) \
xor_unlock_is_negative_byte(1 << nr, p)
#endif
#ifndef clear_bit_unlock_is_negative_byte #ifndef clear_bit_unlock_is_negative_byte
/* /*
......
...@@ -1099,9 +1099,10 @@ static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr) ...@@ -1099,9 +1099,10 @@ static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr)); KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr)); KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
#if defined(clear_bit_unlock_is_negative_byte) #if defined(xor_unlock_is_negative_byte)
if (nr < 7)
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
clear_bit_unlock_is_negative_byte(nr, addr)); xor_unlock_is_negative_byte(1 << nr, addr));
#endif #endif
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment