Commit b08ee5f7 authored by Borislav Petkov's avatar Borislav Petkov Committed by H. Peter Anvin

x86: Simplify __HAVE_ARCH_CMPXCHG tests

Both the 32-bit and 64-bit cmpxchg.h header define __HAVE_ARCH_CMPXCHG
and there's ifdeffery which checks it. But since both bitness define it,
we can just as well move it up to the main cmpxchg header and simpify a
bit of code in doing that.
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: http://lkml.kernel.org/r/20140711104338.GB17083@pd.tnicSigned-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 89171579
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/alternative.h> /* Provides LOCK_PREFIX */ #include <asm/alternative.h> /* Provides LOCK_PREFIX */
#define __HAVE_ARCH_CMPXCHG 1
/* /*
* Non-existant functions to indicate usage errors at link time * Non-existant functions to indicate usage errors at link time
* (or compile-time if the compiler implements __compiletime_error(). * (or compile-time if the compiler implements __compiletime_error().
...@@ -143,7 +145,6 @@ extern void __add_wrong_size(void) ...@@ -143,7 +145,6 @@ extern void __add_wrong_size(void)
# include <asm/cmpxchg_64.h> # include <asm/cmpxchg_64.h>
#endif #endif
#ifdef __HAVE_ARCH_CMPXCHG
#define cmpxchg(ptr, old, new) \ #define cmpxchg(ptr, old, new) \
__cmpxchg(ptr, old, new, sizeof(*(ptr))) __cmpxchg(ptr, old, new, sizeof(*(ptr)))
...@@ -152,7 +153,6 @@ extern void __add_wrong_size(void) ...@@ -152,7 +153,6 @@ extern void __add_wrong_size(void)
#define cmpxchg_local(ptr, old, new) \ #define cmpxchg_local(ptr, old, new) \
__cmpxchg_local(ptr, old, new, sizeof(*(ptr))) __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
#endif
/* /*
* xadd() adds "inc" to "*ptr" and atomically returns the previous * xadd() adds "inc" to "*ptr" and atomically returns the previous
......
...@@ -34,8 +34,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 value) ...@@ -34,8 +34,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 value)
: "memory"); : "memory");
} }
#define __HAVE_ARCH_CMPXCHG 1
#ifdef CONFIG_X86_CMPXCHG64 #ifdef CONFIG_X86_CMPXCHG64
#define cmpxchg64(ptr, o, n) \ #define cmpxchg64(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
......
...@@ -6,8 +6,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val) ...@@ -6,8 +6,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
*ptr = val; *ptr = val;
} }
#define __HAVE_ARCH_CMPXCHG 1
#define cmpxchg64(ptr, o, n) \ #define cmpxchg64(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ #define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
#endif #endif
#if defined(CONFIG_X86_32) && defined(__HAVE_ARCH_CMPXCHG) #if defined(CONFIG_X86_32)
/* /*
* This lock provides nmi access to the CMOS/RTC registers. It has some * This lock provides nmi access to the CMOS/RTC registers. It has some
* special properties. It is owned by a CPU and stores the index register * special properties. It is owned by a CPU and stores the index register
......
...@@ -100,23 +100,11 @@ do { \ ...@@ -100,23 +100,11 @@ do { \
static inline int __mutex_fastpath_trylock(atomic_t *count, static inline int __mutex_fastpath_trylock(atomic_t *count,
int (*fail_fn)(atomic_t *)) int (*fail_fn)(atomic_t *))
{ {
/* /* cmpxchg because it never induces a false contention state. */
* We have two variants here. The cmpxchg based one is the best one
* because it never induce a false contention state. It is included
* here because architectures using the inc/dec algorithms over the
* xchg ones are much more likely to support cmpxchg natively.
*
* If not we fall back to the spinlock based variant - that is
* just as efficient (and simpler) as a 'destructive' probing of
* the mutex state would be.
*/
#ifdef __HAVE_ARCH_CMPXCHG
if (likely(atomic_cmpxchg(count, 1, 0) == 1)) if (likely(atomic_cmpxchg(count, 1, 0) == 1))
return 1; return 1;
return 0; return 0;
#else
return fail_fn(count);
#endif
} }
#endif /* _ASM_X86_MUTEX_32_H */ #endif /* _ASM_X86_MUTEX_32_H */
...@@ -74,10 +74,6 @@ int acpi_fix_pin2_polarity __initdata; ...@@ -74,10 +74,6 @@ int acpi_fix_pin2_polarity __initdata;
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
#endif #endif
#ifndef __HAVE_ARCH_CMPXCHG
#warning ACPI uses CMPXCHG, i486 and later hardware
#endif
/* -------------------------------------------------------------------------- /* --------------------------------------------------------------------------
Boot-time Configuration Boot-time Configuration
-------------------------------------------------------------------------- */ -------------------------------------------------------------------------- */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment