Commit 2e57d1d6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'cmpxchg.2024.05.11a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu

Pull cmpxchg updates from Paul McKenney:
 "Provide one-byte and two-byte cmpxchg() support on sparc32, parisc,
  and csky

  This provides native one-byte and two-byte cmpxchg() support for
  sparc32 and parisc, courtesy of Al Viro. This support is provided by
  the same hashed-array-of-locks technique used for the other atomic
  operations provided for these two platforms.

  There is also emulated one-byte cmpxchg() support for csky using a new
  cmpxchg_emu_u8() function that uses a four-byte cmpxchg() to emulate
  the one-byte variant.

  Similar patches for emulation of one-byte cmpxchg() for arc, sh, and
  xtensa have not yet received maintainer acks, so they are slated for
  the v6.11 merge window"

* tag 'cmpxchg.2024.05.11a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu:
  csky: Emulate one-byte cmpxchg
  lib: Add one-byte emulation function
  parisc: add u16 support to cmpxchg()
  parisc: add missing export of __cmpxchg_u8()
  parisc: unify implementations of __cmpxchg_u{8,32,64}
  parisc: __cmpxchg_u32(): lift conversion into the callers
  sparc32: add __cmpxchg_u{8,16}() and teach __cmpxchg() to handle those sizes
  sparc32: unify __cmpxchg_u{32,64}
  sparc32: make the first argument of __cmpxchg_u64() volatile u64 *
  sparc32: make __cmpxchg_u32() return u32
parents c0b9620b 5800e77d
...@@ -1617,4 +1617,7 @@ config CC_HAS_SANE_FUNCTION_ALIGNMENT ...@@ -1617,4 +1617,7 @@ config CC_HAS_SANE_FUNCTION_ALIGNMENT
# strict alignment always, even with -falign-functions. # strict alignment always, even with -falign-functions.
def_bool CC_HAS_MIN_FUNCTION_ALIGNMENT || CC_IS_CLANG def_bool CC_HAS_MIN_FUNCTION_ALIGNMENT || CC_IS_CLANG
config ARCH_NEED_CMPXCHG_1_EMU
bool
endmenu endmenu
...@@ -37,6 +37,7 @@ config CSKY ...@@ -37,6 +37,7 @@ config CSKY
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_NEED_CMPXCHG_1_EMU
select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace) select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace)
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select COMMON_CLK select COMMON_CLK
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#include <linux/bug.h> #include <linux/bug.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include <linux/cmpxchg-emu.h>
#define __xchg_relaxed(new, ptr, size) \ #define __xchg_relaxed(new, ptr, size) \
({ \ ({ \
...@@ -61,6 +62,9 @@ ...@@ -61,6 +62,9 @@
__typeof__(old) __old = (old); \ __typeof__(old) __old = (old); \
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
switch (size) { \ switch (size) { \
case 1: \
__ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \
break; \
case 4: \ case 4: \
asm volatile ( \ asm volatile ( \
"1: ldex.w %0, (%3) \n" \ "1: ldex.w %0, (%3) \n" \
...@@ -91,6 +95,9 @@ ...@@ -91,6 +95,9 @@
__typeof__(old) __old = (old); \ __typeof__(old) __old = (old); \
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
switch (size) { \ switch (size) { \
case 1: \
__ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \
break; \
case 4: \ case 4: \
asm volatile ( \ asm volatile ( \
"1: ldex.w %0, (%3) \n" \ "1: ldex.w %0, (%3) \n" \
...@@ -122,6 +129,9 @@ ...@@ -122,6 +129,9 @@
__typeof__(old) __old = (old); \ __typeof__(old) __old = (old); \
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
switch (size) { \ switch (size) { \
case 1: \
__ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \
break; \
case 4: \ case 4: \
asm volatile ( \ asm volatile ( \
RELEASE_FENCE \ RELEASE_FENCE \
......
...@@ -56,26 +56,24 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size) ...@@ -56,26 +56,24 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
/* bug catcher for when unsupported size is used - won't link */ /* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void); extern void __cmpxchg_called_with_bad_pointer(void);
/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ /* __cmpxchg_u... defined in arch/parisc/lib/bitops.c */
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
unsigned int new_);
extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_); extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
extern u16 __cmpxchg_u16(volatile u16 *ptr, u16 old, u16 new_);
extern u32 __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
/* don't worry...optimizer will get rid of most of this */ /* don't worry...optimizer will get rid of most of this */
static inline unsigned long static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{ {
switch (size) { return
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
case 8: return __cmpxchg_u64((u64 *)ptr, old, new_); size == 8 ? __cmpxchg_u64(ptr, old, new_) :
#endif #endif
case 4: return __cmpxchg_u32((unsigned int *)ptr, size == 4 ? __cmpxchg_u32(ptr, old, new_) :
(unsigned int)old, (unsigned int)new_); size == 2 ? __cmpxchg_u16(ptr, old, new_) :
case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff); size == 1 ? __cmpxchg_u8(ptr, old, new_) :
} (__cmpxchg_called_with_bad_pointer(), old);
__cmpxchg_called_with_bad_pointer();
return old;
} }
#define arch_cmpxchg(ptr, o, n) \ #define arch_cmpxchg(ptr, o, n) \
......
...@@ -22,6 +22,8 @@ EXPORT_SYMBOL(memset); ...@@ -22,6 +22,8 @@ EXPORT_SYMBOL(memset);
#include <linux/atomic.h> #include <linux/atomic.h>
EXPORT_SYMBOL(__xchg8); EXPORT_SYMBOL(__xchg8);
EXPORT_SYMBOL(__xchg32); EXPORT_SYMBOL(__xchg32);
EXPORT_SYMBOL(__cmpxchg_u8);
EXPORT_SYMBOL(__cmpxchg_u16);
EXPORT_SYMBOL(__cmpxchg_u32); EXPORT_SYMBOL(__cmpxchg_u32);
EXPORT_SYMBOL(__cmpxchg_u64); EXPORT_SYMBOL(__cmpxchg_u64);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -56,38 +56,20 @@ unsigned long notrace __xchg8(char x, volatile char *ptr) ...@@ -56,38 +56,20 @@ unsigned long notrace __xchg8(char x, volatile char *ptr)
} }
u64 notrace __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new) #define CMPXCHG(T) \
{ T notrace __cmpxchg_##T(volatile T *ptr, T old, T new) \
unsigned long flags; { \
u64 prev; unsigned long flags; \
T prev; \
_atomic_spin_lock_irqsave(ptr, flags); \
if ((prev = *ptr) == old) _atomic_spin_lock_irqsave(ptr, flags); \
*ptr = new; if ((prev = *ptr) == old) \
_atomic_spin_unlock_irqrestore(ptr, flags); *ptr = new; \
return prev; _atomic_spin_unlock_irqrestore(ptr, flags); \
} return prev; \
}
unsigned long notrace __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
{ CMPXCHG(u64)
unsigned long flags; CMPXCHG(u32)
unsigned int prev; CMPXCHG(u16)
CMPXCHG(u8)
_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
*ptr = new;
_atomic_spin_unlock_irqrestore(ptr, flags);
return (unsigned long)prev;
}
u8 notrace __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
{
unsigned long flags;
u8 prev;
_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
*ptr = new;
_atomic_spin_unlock_irqrestore(ptr, flags);
return prev;
}
...@@ -38,21 +38,19 @@ static __always_inline unsigned long __arch_xchg(unsigned long x, __volatile__ v ...@@ -38,21 +38,19 @@ static __always_inline unsigned long __arch_xchg(unsigned long x, __volatile__ v
/* bug catcher for when unsupported size is used - won't link */ /* bug catcher for when unsupported size is used - won't link */
void __cmpxchg_called_with_bad_pointer(void); void __cmpxchg_called_with_bad_pointer(void);
/* we only need to support cmpxchg of a u32 on sparc */ u8 __cmpxchg_u8(volatile u8 *m, u8 old, u8 new_);
unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_); u16 __cmpxchg_u16(volatile u16 *m, u16 old, u16 new_);
u32 __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
/* don't worry...optimizer will get rid of most of this */ /* don't worry...optimizer will get rid of most of this */
static inline unsigned long static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{ {
switch (size) { return
case 4: size == 1 ? __cmpxchg_u8(ptr, old, new_) :
return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_); size == 2 ? __cmpxchg_u16(ptr, old, new_) :
default: size == 4 ? __cmpxchg_u32(ptr, old, new_) :
__cmpxchg_called_with_bad_pointer(); (__cmpxchg_called_with_bad_pointer(), old);
break;
}
return old;
} }
#define arch_cmpxchg(ptr, o, n) \ #define arch_cmpxchg(ptr, o, n) \
...@@ -63,7 +61,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) ...@@ -63,7 +61,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
(unsigned long)_n_, sizeof(*(ptr))); \ (unsigned long)_n_, sizeof(*(ptr))); \
}) })
u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new); u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new);
#define arch_cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new) #define arch_cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
#include <asm-generic/cmpxchg-local.h> #include <asm-generic/cmpxchg-local.h>
......
...@@ -159,32 +159,27 @@ unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask) ...@@ -159,32 +159,27 @@ unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
} }
EXPORT_SYMBOL(sp32___change_bit); EXPORT_SYMBOL(sp32___change_bit);
unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) #define CMPXCHG(T) \
{ T __cmpxchg_##T(volatile T *ptr, T old, T new) \
unsigned long flags; { \
u32 prev; unsigned long flags; \
T prev; \
spin_lock_irqsave(ATOMIC_HASH(ptr), flags); \
if ((prev = *ptr) == old) spin_lock_irqsave(ATOMIC_HASH(ptr), flags); \
*ptr = new; if ((prev = *ptr) == old) \
spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); *ptr = new; \
spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);\
return (unsigned long)prev; \
} return prev; \
}
CMPXCHG(u8)
CMPXCHG(u16)
CMPXCHG(u32)
CMPXCHG(u64)
EXPORT_SYMBOL(__cmpxchg_u8);
EXPORT_SYMBOL(__cmpxchg_u16);
EXPORT_SYMBOL(__cmpxchg_u32); EXPORT_SYMBOL(__cmpxchg_u32);
u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new)
{
unsigned long flags;
u64 prev;
spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
if ((prev = *ptr) == old)
*ptr = new;
spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
return prev;
}
EXPORT_SYMBOL(__cmpxchg_u64); EXPORT_SYMBOL(__cmpxchg_u64);
unsigned long __xchg_u32(volatile u32 *ptr, u32 new) unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
......
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Emulated 1-byte and 2-byte cmpxchg operations for architectures
* lacking direct support for these sizes. These are implemented in terms
* of 4-byte cmpxchg operations.
*
* Copyright (C) 2024 Paul E. McKenney.
*/
#ifndef __LINUX_CMPXCHG_EMU_H
#define __LINUX_CMPXCHG_EMU_H
uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new);
#endif /* __LINUX_CMPXCHG_EMU_H */
...@@ -236,6 +236,7 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o ...@@ -236,6 +236,7 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
lib-$(CONFIG_GENERIC_BUG) += bug.o lib-$(CONFIG_GENERIC_BUG) += bug.o
obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
obj-$(CONFIG_ARCH_NEED_CMPXCHG_1_EMU) += cmpxchg-emu.o
obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o
#ensure exported functions have prototypes #ensure exported functions have prototypes
......
// SPDX-License-Identifier: GPL-2.0+
/*
* Emulated 1-byte cmpxchg operation for architectures lacking direct
* support for this size. This is implemented in terms of 4-byte cmpxchg
* operations.
*
* Copyright (C) 2024 Paul E. McKenney.
*/
#include <linux/types.h>
#include <linux/export.h>
#include <linux/instrumented.h>
#include <linux/atomic.h>
#include <linux/panic.h>
#include <linux/bug.h>
#include <asm-generic/rwonce.h>
#include <linux/cmpxchg-emu.h>
union u8_32 {
u8 b[4];
u32 w;
};
/* Emulate one-byte cmpxchg() in terms of 4-byte cmpxchg. */
uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new)
{
u32 *p32 = (u32 *)(((uintptr_t)p) & ~0x3);
int i = ((uintptr_t)p) & 0x3;
union u8_32 old32;
union u8_32 new32;
u32 ret;
ret = READ_ONCE(*p32);
do {
old32.w = ret;
if (old32.b[i] != old)
return old32.b[i];
new32.w = old32.w;
new32.b[i] = new;
instrument_atomic_read_write(p, 1);
ret = data_race(cmpxchg(p32, old32.w, new32.w)); // Overridden above.
} while (ret != old32.w);
return old;
}
EXPORT_SYMBOL_GPL(cmpxchg_emu_u8);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment