Commit 51b14c76 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

[PATCH] sanitize intel movsl selection

The ifdef <actual cpu selection> is very bad style, we usually introduce
feature CONFIG_ options in config.in instead.
parent 1d4123ec
......@@ -83,6 +83,7 @@ if [ "$CONFIG_M586MMX" = "y" ]; then
define_bool CONFIG_X86_GOOD_APIC y
define_bool CONFIG_X86_PPRO_FENCE y
define_bool CONFIG_X86_F00F_BUG y
define_bool CONFIG_X86_INTEL_USERCOPY y
fi
if [ "$CONFIG_M686" = "y" ]; then
define_int CONFIG_X86_L1_CACHE_SHIFT 5
......@@ -96,12 +97,14 @@ if [ "$CONFIG_MPENTIUMIII" = "y" ]; then
define_bool CONFIG_X86_TSC y
define_bool CONFIG_X86_GOOD_APIC y
define_bool CONFIG_X86_USE_PPRO_CHECKSUM y
define_bool CONFIG_X86_INTEL_USERCOPY y
fi
if [ "$CONFIG_MPENTIUM4" = "y" ]; then
define_int CONFIG_X86_L1_CACHE_SHIFT 7
define_bool CONFIG_X86_TSC y
define_bool CONFIG_X86_GOOD_APIC y
define_bool CONFIG_X86_USE_PPRO_CHECKSUM y
define_bool CONFIG_X86_INTEL_USERCOPY y
fi
if [ "$CONFIG_MK6" = "y" ]; then
define_int CONFIG_X86_L1_CACHE_SHIFT 5
......
......@@ -14,9 +14,11 @@ static int disable_x86_serial_nr __initdata = 1;
static int disable_P4_HT __initdata = 0;
extern int trap_init_f00f_bug(void);
#ifdef INTEL_MOVSL
struct movsl_mask movsl_mask; /* alignment at which movsl is preferred for
bulk memory copies */
#ifdef CONFIG_X86_INTEL_USERCOPY
/*
* Alignment at which movsl is preferred for bulk memory copies.
*/
struct movsl_mask movsl_mask;
#endif
/*
......@@ -355,7 +357,7 @@ static void __init init_intel(struct cpuinfo_x86 *c)
/* Work around errata */
Intel_errata_workarounds(c);
#ifdef INTEL_MOVSL
#ifdef CONFIG_X86_INTEL_USERCOPY
/*
* Set up the preferred alignment for movsl bulk memory moves
*/
......@@ -372,7 +374,6 @@ static void __init init_intel(struct cpuinfo_x86 *c)
break;
}
#endif
}
......
......@@ -9,21 +9,14 @@
#include <asm/uaccess.h>
#include <asm/mmx.h>
#ifdef INTEL_MOVSL
static inline int movsl_is_ok(const void *a1, const void *a2, unsigned long n)
{
if (n < 64)
return 1;
if ((((const long)a1 ^ (const long)a2) & movsl_mask.mask) == 0)
return 1;
return 0;
}
#else
static inline int movsl_is_ok(const void *a1, const void *a2, unsigned long n)
{
#ifdef CONFIG_X86_INTEL_USERCOPY
if (n >= 64 || (((const long)a1 ^ (const long)a2) & movsl_mask.mask))
return 0;
#endif
return 1;
}
#endif
/*
* Copy a null terminated string from userspace.
......@@ -151,8 +144,7 @@ long strnlen_user(const char *s, long n)
return res & mask;
}
#ifdef INTEL_MOVSL
#ifdef CONFIG_X86_INTEL_USERCOPY
static unsigned long
__copy_user_intel(void *to, const void *from,unsigned long size)
{
......@@ -334,8 +326,7 @@ __copy_user_zeroing_intel(void *to, const void *from, unsigned long size)
: "eax", "edx", "memory");
return size;
}
#else /* INTEL_MOVSL */
#else
/*
* Leave these declared but undefined. They should not be any references to
* them
......@@ -344,8 +335,7 @@ unsigned long
__copy_user_zeroing_intel(void *to, const void *from, unsigned long size);
unsigned long
__copy_user_intel(void *to, const void *from,unsigned long size);
#endif /* INTEL_MOVSL */
#endif /* CONFIG_X86_INTEL_USERCOPY */
/* Generic arbitrary sized copy. */
#define __copy_user(to,from,size) \
......
......@@ -36,12 +36,7 @@
/*
* movsl can be slow when source and dest are not both 8-byte aligned
*/
#if defined(CONFIG_M586MMX) || defined(CONFIG_M686) || \
defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUM4)
#define INTEL_MOVSL
#endif
#ifdef INTEL_MOVSL
#ifdef CONFIG_X86_INTEL_USERCOPY
extern struct movsl_mask {
int mask;
} ____cacheline_aligned_in_smp movsl_mask;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment