Commit 7016cc5d authored by Uros Bizjak's avatar Uros Bizjak Committed by Ingo Molnar

locking/atomic/x86: Modernize x86_32 arch_{,try_}_cmpxchg64{,_local}()

Commit:

  b23e139d ("arch: Introduce arch_{,try_}_cmpxchg128{,_local}()")

introduced arch_{,try_}_cmpxchg128{,_local}() for x86_64 targets.

Modernize existing x86_32 arch_{,try_}_cmpxchg64{,_local}() definitions
to follow the same structure as the definitions introduced by the
above commit.

No functional changes intended.
Signed-off-by: default avatarUros Bizjak <ubizjak@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Link: https://lore.kernel.org/r/20240408091547.90111-3-ubizjak@gmail.com
parent 929ad065
......@@ -3,103 +3,124 @@
#define _ASM_X86_CMPXCHG_32_H
/*
* Note: if you use set64_bit(), __cmpxchg64(), or their variants,
* Note: if you use __cmpxchg64(), or their variants,
* you need to test for the feature in boot_cpu_data.
*/
#ifdef CONFIG_X86_CMPXCHG64
#define arch_cmpxchg64(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
(unsigned long long)(n)))
#define arch_cmpxchg64_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
(unsigned long long)(n)))
#define arch_try_cmpxchg64(ptr, po, n) \
__try_cmpxchg64((ptr), (unsigned long long *)(po), \
(unsigned long long)(n))
#endif
union __u64_halves {
u64 full;
struct {
u32 low, high;
};
};
#define __arch_cmpxchg64(_ptr, _old, _new, _lock) \
({ \
union __u64_halves o = { .full = (_old), }, \
n = { .full = (_new), }; \
\
asm volatile(_lock "cmpxchg8b %[ptr]" \
: [ptr] "+m" (*(_ptr)), \
"+a" (o.low), "+d" (o.high) \
: "b" (n.low), "c" (n.high) \
: "memory"); \
\
o.full; \
})
static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
static __always_inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
{
u64 prev;
asm volatile(LOCK_PREFIX "cmpxchg8b %1"
: "=A" (prev),
"+m" (*ptr)
: "b" ((u32)new),
"c" ((u32)(new >> 32)),
"0" (old)
: "memory");
return prev;
return __arch_cmpxchg64(ptr, old, new, LOCK_PREFIX);
}
static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
static __always_inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
{
u64 prev;
asm volatile("cmpxchg8b %1"
: "=A" (prev),
"+m" (*ptr)
: "b" ((u32)new),
"c" ((u32)(new >> 32)),
"0" (old)
: "memory");
return prev;
return __arch_cmpxchg64(ptr, old, new,);
}
static inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *pold, u64 new)
#define __arch_try_cmpxchg64(_ptr, _oldp, _new, _lock) \
({ \
union __u64_halves o = { .full = *(_oldp), }, \
n = { .full = (_new), }; \
bool ret; \
\
asm volatile(_lock "cmpxchg8b %[ptr]" \
CC_SET(e) \
: CC_OUT(e) (ret), \
[ptr] "+m" (*(_ptr)), \
"+a" (o.low), "+d" (o.high) \
: "b" (n.low), "c" (n.high) \
: "memory"); \
\
if (unlikely(!ret)) \
*(_oldp) = o.full; \
\
likely(ret); \
})
static __always_inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new)
{
bool success;
u64 old = *pold;
asm volatile(LOCK_PREFIX "cmpxchg8b %[ptr]"
CC_SET(z)
: CC_OUT(z) (success),
[ptr] "+m" (*ptr),
"+A" (old)
: "b" ((u32)new),
"c" ((u32)(new >> 32))
: "memory");
if (unlikely(!success))
*pold = old;
return success;
return __arch_try_cmpxchg64(ptr, oldp, new, LOCK_PREFIX);
}
#ifndef CONFIG_X86_CMPXCHG64
#ifdef CONFIG_X86_CMPXCHG64
#define arch_cmpxchg64 __cmpxchg64
#define arch_cmpxchg64_local __cmpxchg64_local
#define arch_try_cmpxchg64 __try_cmpxchg64
#else
/*
* Building a kernel capable running on 80386 and 80486. It may be necessary
* to simulate the cmpxchg8b on the 80386 and 80486 CPU.
*/
#define arch_cmpxchg64(ptr, o, n) \
#define __arch_cmpxchg64_emu(_ptr, _old, _new) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (o); \
__typeof__(*(ptr)) __new = (n); \
alternative_io(LOCK_PREFIX_HERE \
union __u64_halves o = { .full = (_old), }, \
n = { .full = (_new), }; \
\
asm volatile(ALTERNATIVE(LOCK_PREFIX_HERE \
"call cmpxchg8b_emu", \
"lock; cmpxchg8b (%%esi)" , \
X86_FEATURE_CX8, \
"=A" (__ret), \
"S" ((ptr)), "0" (__old), \
"b" ((unsigned int)__new), \
"c" ((unsigned int)(__new>>32)) \
"lock; cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
: [ptr] "+m" (*(_ptr)), \
"+a" (o.low), "+d" (o.high) \
: "b" (n.low), "c" (n.high), "S" (_ptr) \
: "memory"); \
__ret; })
\
o.full; \
})
static __always_inline u64 arch_cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
{
return __arch_cmpxchg64_emu(ptr, old, new);
}
#define arch_cmpxchg64 arch_cmpxchg64
#define arch_cmpxchg64_local(ptr, o, n) \
#define __arch_cmpxchg64_emu_local(_ptr, _old, _new) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (o); \
__typeof__(*(ptr)) __new = (n); \
alternative_io("call cmpxchg8b_emu", \
"cmpxchg8b (%%esi)" , \
X86_FEATURE_CX8, \
"=A" (__ret), \
"S" ((ptr)), "0" (__old), \
"b" ((unsigned int)__new), \
"c" ((unsigned int)(__new>>32)) \
union __u64_halves o = { .full = (_old), }, \
n = { .full = (_new), }; \
\
asm volatile(ALTERNATIVE("call cmpxchg8b_emu", \
"cmpxchg8b %[ptr]", X86_FEATURE_CX8) \
: [ptr] "+m" (*(_ptr)), \
"+a" (o.low), "+d" (o.high) \
: "b" (n.low), "c" (n.high), "S" (_ptr) \
: "memory"); \
__ret; })
\
o.full; \
})
static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
{
return __arch_cmpxchg64_emu_local(ptr, old, new);
}
#define arch_cmpxchg64_local arch_cmpxchg64_local
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment