Commit 3e1b83ab authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86:
  x86: rdc: leds build/config fix
  x86: sysfs cpu?/topology is empty in 2.6.25 (32-bit Intel system)
  x86: revert commit 709f744f ("x86: bitops asm constraint fixes")
  x86: restrict keyboard io ports reservation to make ipmi driver work
  x86: fix fpu restore from sig return
  x86: remove spew print out about bus to node mapping
  x86: revert printk format warning change which is for linux-next
  x86: cleanup PAT cpu validation
  x86: geode: define geode_has_vsa2() even if CONFIG_MGEODE_LX is not set
  x86: GEODE: cache results from geode_has_vsa2() and uninline
  x86: revert geode config dependency
parents 8e3e076c 82fd8667
...@@ -335,6 +335,7 @@ config X86_RDC321X ...@@ -335,6 +335,7 @@ config X86_RDC321X
select GENERIC_GPIO select GENERIC_GPIO
select LEDS_CLASS select LEDS_CLASS
select LEDS_GPIO select LEDS_GPIO
select NEW_LEDS
help help
This option is needed for RDC R-321x system-on-chip, also known This option is needed for RDC R-321x system-on-chip, also known
as R-8610-(G). as R-8610-(G).
...@@ -1662,7 +1663,6 @@ config GEODE_MFGPT_TIMER ...@@ -1662,7 +1663,6 @@ config GEODE_MFGPT_TIMER
config OLPC config OLPC
bool "One Laptop Per Child support" bool "One Laptop Per Child support"
depends on MGEODE_LX
default n default n
help help
Add support for detecting the unique features of the OLPC Add support for detecting the unique features of the OLPC
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <asm/pat.h>
#include <asm/processor.h> #include <asm/processor.h>
struct cpuid_bit { struct cpuid_bit {
...@@ -48,3 +49,23 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) ...@@ -48,3 +49,23 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
set_cpu_cap(c, cb->feature); set_cpu_cap(c, cb->feature);
} }
} }
#ifdef CONFIG_X86_PAT
void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
{
switch (c->x86_vendor) {
case X86_VENDOR_AMD:
if (c->x86 >= 0xf && c->x86 <= 0x11)
return;
break;
case X86_VENDOR_INTEL:
if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
return;
break;
}
pat_disable(cpu_has_pat ?
"PAT disabled. Not yet verified on this CPU type." :
"PAT not supported by CPU.");
}
#endif
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/pat.h>
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h> #include <asm/mpspec.h>
#include <asm/apic.h> #include <asm/apic.h>
...@@ -308,19 +309,6 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) ...@@ -308,19 +309,6 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
} }
clear_cpu_cap(c, X86_FEATURE_PAT);
switch (c->x86_vendor) {
case X86_VENDOR_AMD:
if (c->x86 >= 0xf && c->x86 <= 0x11)
set_cpu_cap(c, X86_FEATURE_PAT);
break;
case X86_VENDOR_INTEL:
if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
set_cpu_cap(c, X86_FEATURE_PAT);
break;
}
} }
/* /*
...@@ -409,18 +397,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) ...@@ -409,18 +397,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
init_scattered_cpuid_features(c); init_scattered_cpuid_features(c);
} }
clear_cpu_cap(c, X86_FEATURE_PAT);
switch (c->x86_vendor) {
case X86_VENDOR_AMD:
if (c->x86 >= 0xf && c->x86 <= 0x11)
set_cpu_cap(c, X86_FEATURE_PAT);
break;
case X86_VENDOR_INTEL:
if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
set_cpu_cap(c, X86_FEATURE_PAT);
break;
}
} }
static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
...@@ -651,6 +627,7 @@ void __init early_cpu_init(void) ...@@ -651,6 +627,7 @@ void __init early_cpu_init(void)
cpu_devs[cvdev->vendor] = cvdev->cpu_dev; cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
early_cpu_detect(); early_cpu_detect();
validate_pat_support(&boot_cpu_data);
} }
/* Make sure %fs is initialized properly in idle threads */ /* Make sure %fs is initialized properly in idle threads */
......
...@@ -161,6 +161,25 @@ void geode_gpio_setup_event(unsigned int gpio, int pair, int pme) ...@@ -161,6 +161,25 @@ void geode_gpio_setup_event(unsigned int gpio, int pair, int pme)
} }
EXPORT_SYMBOL_GPL(geode_gpio_setup_event); EXPORT_SYMBOL_GPL(geode_gpio_setup_event);
int geode_has_vsa2(void)
{
static int has_vsa2 = -1;
if (has_vsa2 == -1) {
/*
* The VSA has virtual registers that we can query for a
* signature.
*/
outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
has_vsa2 = (inw(VSA_VRC_DATA) == VSA_SIG);
}
return has_vsa2;
}
EXPORT_SYMBOL_GPL(geode_has_vsa2);
static int __init geode_southbridge_init(void) static int __init geode_southbridge_init(void)
{ {
if (!is_geode()) if (!is_geode())
......
...@@ -450,7 +450,6 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) ...@@ -450,7 +450,6 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
clear_fpu(tsk);
return __copy_from_user(&tsk->thread.xstate->fsave, buf, return __copy_from_user(&tsk->thread.xstate->fsave, buf,
sizeof(struct i387_fsave_struct)); sizeof(struct i387_fsave_struct));
} }
...@@ -461,7 +460,6 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) ...@@ -461,7 +460,6 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf)
struct user_i387_ia32_struct env; struct user_i387_ia32_struct env;
int err; int err;
clear_fpu(tsk);
err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0], err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0],
sizeof(struct i387_fxsave_struct)); sizeof(struct i387_fxsave_struct));
/* mxcsr reserved bits must be masked to zero for security reasons */ /* mxcsr reserved bits must be masked to zero for security reasons */
...@@ -478,6 +476,16 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf) ...@@ -478,6 +476,16 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf)
int err; int err;
if (HAVE_HWFP) { if (HAVE_HWFP) {
struct task_struct *tsk = current;
clear_fpu(tsk);
if (!used_math()) {
err = init_fpu(tsk);
if (err)
return err;
}
if (cpu_has_fxsr) if (cpu_has_fxsr)
err = restore_i387_fxsave(buf); err = restore_i387_fxsave(buf);
else else
......
...@@ -95,7 +95,7 @@ void __init setup_per_cpu_areas(void) ...@@ -95,7 +95,7 @@ void __init setup_per_cpu_areas(void)
/* Copy section for each CPU (we discard the original) */ /* Copy section for each CPU (we discard the original) */
size = PERCPU_ENOUGH_ROOM; size = PERCPU_ENOUGH_ROOM;
printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n",
size); size);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
......
...@@ -127,7 +127,12 @@ static struct resource standard_io_resources[] = { { ...@@ -127,7 +127,12 @@ static struct resource standard_io_resources[] = { {
}, { }, {
.name = "keyboard", .name = "keyboard",
.start = 0x0060, .start = 0x0060,
.end = 0x006f, .end = 0x0060,
.flags = IORESOURCE_BUSY | IORESOURCE_IO
}, {
.name = "keyboard",
.start = 0x0064,
.end = 0x0064,
.flags = IORESOURCE_BUSY | IORESOURCE_IO .flags = IORESOURCE_BUSY | IORESOURCE_IO
}, { }, {
.name = "dma page reg", .name = "dma page reg",
......
...@@ -70,6 +70,7 @@ ...@@ -70,6 +70,7 @@
#include <asm/ds.h> #include <asm/ds.h>
#include <asm/topology.h> #include <asm/topology.h>
#include <asm/trampoline.h> #include <asm/trampoline.h>
#include <asm/pat.h>
#include <mach_apic.h> #include <mach_apic.h>
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
...@@ -128,7 +129,9 @@ static struct resource standard_io_resources[] = { ...@@ -128,7 +129,9 @@ static struct resource standard_io_resources[] = {
.flags = IORESOURCE_BUSY | IORESOURCE_IO }, .flags = IORESOURCE_BUSY | IORESOURCE_IO },
{ .name = "timer1", .start = 0x50, .end = 0x53, { .name = "timer1", .start = 0x50, .end = 0x53,
.flags = IORESOURCE_BUSY | IORESOURCE_IO }, .flags = IORESOURCE_BUSY | IORESOURCE_IO },
{ .name = "keyboard", .start = 0x60, .end = 0x6f, { .name = "keyboard", .start = 0x60, .end = 0x60,
.flags = IORESOURCE_BUSY | IORESOURCE_IO },
{ .name = "keyboard", .start = 0x64, .end = 0x64,
.flags = IORESOURCE_BUSY | IORESOURCE_IO }, .flags = IORESOURCE_BUSY | IORESOURCE_IO },
{ .name = "dma page reg", .start = 0x80, .end = 0x8f, { .name = "dma page reg", .start = 0x80, .end = 0x8f,
.flags = IORESOURCE_BUSY | IORESOURCE_IO }, .flags = IORESOURCE_BUSY | IORESOURCE_IO },
...@@ -1063,25 +1066,19 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) ...@@ -1063,25 +1066,19 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
if (c->extended_cpuid_level >= 0x80000007) if (c->extended_cpuid_level >= 0x80000007)
c->x86_power = cpuid_edx(0x80000007); c->x86_power = cpuid_edx(0x80000007);
clear_cpu_cap(c, X86_FEATURE_PAT);
switch (c->x86_vendor) { switch (c->x86_vendor) {
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
early_init_amd(c); early_init_amd(c);
if (c->x86 >= 0xf && c->x86 <= 0x11)
set_cpu_cap(c, X86_FEATURE_PAT);
break; break;
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
early_init_intel(c); early_init_intel(c);
if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
set_cpu_cap(c, X86_FEATURE_PAT);
break; break;
case X86_VENDOR_CENTAUR: case X86_VENDOR_CENTAUR:
early_init_centaur(c); early_init_centaur(c);
break; break;
} }
validate_pat_support(c);
} }
/* /*
......
...@@ -25,31 +25,24 @@ ...@@ -25,31 +25,24 @@
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/io.h> #include <asm/io.h>
int pat_wc_enabled = 1; #ifdef CONFIG_X86_PAT
int __read_mostly pat_wc_enabled = 1;
static u64 __read_mostly boot_pat_state; void __init pat_disable(char *reason)
static int nopat(char *str)
{ {
pat_wc_enabled = 0; pat_wc_enabled = 0;
printk(KERN_INFO "x86: PAT support disabled.\n"); printk(KERN_INFO "%s\n", reason);
return 0;
} }
early_param("nopat", nopat);
static int pat_known_cpu(void) static int nopat(char *str)
{ {
if (!pat_wc_enabled) pat_disable("PAT support disabled.");
return 0;
if (cpu_has_pat)
return 1;
pat_wc_enabled = 0;
printk(KERN_INFO "CPU and/or kernel does not support PAT.\n");
return 0; return 0;
} }
early_param("nopat", nopat);
#endif
static u64 __read_mostly boot_pat_state;
enum { enum {
PAT_UC = 0, /* uncached */ PAT_UC = 0, /* uncached */
...@@ -66,17 +59,19 @@ void pat_init(void) ...@@ -66,17 +59,19 @@ void pat_init(void)
{ {
u64 pat; u64 pat;
#ifndef CONFIG_X86_PAT if (!pat_wc_enabled)
nopat(NULL);
#endif
/* Boot CPU enables PAT based on CPU feature */
if (!smp_processor_id() && !pat_known_cpu())
return; return;
/* APs enable PAT iff boot CPU has enabled it before */ /* Paranoia check. */
if (smp_processor_id() && !pat_wc_enabled) if (!cpu_has_pat) {
return; printk(KERN_ERR "PAT enabled, but CPU feature cleared\n");
/*
* Panic if this happens on the secondary CPU, and we
* switched to PAT on the boot CPU. We have no way to
* undo PAT.
*/
BUG_ON(boot_pat_state);
}
/* Set PWT to Write-Combining. All other bits stay the same */ /* Set PWT to Write-Combining. All other bits stay the same */
/* /*
...@@ -95,9 +90,8 @@ void pat_init(void) ...@@ -95,9 +90,8 @@ void pat_init(void)
PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC); PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC);
/* Boot CPU check */ /* Boot CPU check */
if (!smp_processor_id()) { if (!boot_pat_state)
rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
}
wrmsrl(MSR_IA32_CR_PAT, pat); wrmsrl(MSR_IA32_CR_PAT, pat);
printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
......
...@@ -504,14 +504,6 @@ static int __init early_fill_mp_bus_info(void) ...@@ -504,14 +504,6 @@ static int __init early_fill_mp_bus_info(void)
} }
} }
#ifdef CONFIG_NUMA
for (i = 0; i < BUS_NR; i++) {
node = mp_bus_to_node[i];
if (node >= 0)
printk(KERN_DEBUG "bus: %02x to node: %02x\n", i, node);
}
#endif
for (i = 0; i < pci_root_num; i++) { for (i = 0; i < pci_root_num; i++) {
int res_num; int res_num;
int busnum; int busnum;
......
...@@ -23,13 +23,10 @@ ...@@ -23,13 +23,10 @@
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
/* Technically wrong, but this avoids compilation errors on some gcc /* Technically wrong, but this avoids compilation errors on some gcc
versions. */ versions. */
#define ADDR "=m" (*(volatile long *)addr) #define ADDR "=m" (*(volatile long *) addr)
#define BIT_ADDR "=m" (((volatile int *)addr)[nr >> 5])
#else #else
#define ADDR "+m" (*(volatile long *) addr) #define ADDR "+m" (*(volatile long *) addr)
#define BIT_ADDR "+m" (((volatile int *)addr)[nr >> 5])
#endif #endif
#define BASE_ADDR "m" (*(volatile int *)addr)
/** /**
* set_bit - Atomically set a bit in memory * set_bit - Atomically set a bit in memory
...@@ -77,7 +74,7 @@ static inline void __set_bit(int nr, volatile void *addr) ...@@ -77,7 +74,7 @@ static inline void __set_bit(int nr, volatile void *addr)
*/ */
static inline void clear_bit(int nr, volatile void *addr) static inline void clear_bit(int nr, volatile void *addr)
{ {
asm volatile(LOCK_PREFIX "btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); asm volatile(LOCK_PREFIX "btr %1,%0" : ADDR : "Ir" (nr));
} }
/* /*
...@@ -96,7 +93,7 @@ static inline void clear_bit_unlock(unsigned nr, volatile void *addr) ...@@ -96,7 +93,7 @@ static inline void clear_bit_unlock(unsigned nr, volatile void *addr)
static inline void __clear_bit(int nr, volatile void *addr) static inline void __clear_bit(int nr, volatile void *addr)
{ {
asm volatile("btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
} }
/* /*
...@@ -131,7 +128,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) ...@@ -131,7 +128,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr)
*/ */
static inline void __change_bit(int nr, volatile void *addr) static inline void __change_bit(int nr, volatile void *addr)
{ {
asm volatile("btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
} }
/** /**
...@@ -145,7 +142,7 @@ static inline void __change_bit(int nr, volatile void *addr) ...@@ -145,7 +142,7 @@ static inline void __change_bit(int nr, volatile void *addr)
*/ */
static inline void change_bit(int nr, volatile void *addr) static inline void change_bit(int nr, volatile void *addr)
{ {
asm volatile(LOCK_PREFIX "btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr));
} }
/** /**
...@@ -191,9 +188,10 @@ static inline int __test_and_set_bit(int nr, volatile void *addr) ...@@ -191,9 +188,10 @@ static inline int __test_and_set_bit(int nr, volatile void *addr)
{ {
int oldbit; int oldbit;
asm volatile("bts %2,%3\n\t" asm("bts %2,%1\n\t"
"sbb %0,%0" "sbb %0,%0"
: "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); : "=r" (oldbit), ADDR
: "Ir" (nr));
return oldbit; return oldbit;
} }
...@@ -229,9 +227,10 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr) ...@@ -229,9 +227,10 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr)
{ {
int oldbit; int oldbit;
asm volatile("btr %2,%3\n\t" asm volatile("btr %2,%1\n\t"
"sbb %0,%0" "sbb %0,%0"
: "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); : "=r" (oldbit), ADDR
: "Ir" (nr));
return oldbit; return oldbit;
} }
...@@ -240,9 +239,10 @@ static inline int __test_and_change_bit(int nr, volatile void *addr) ...@@ -240,9 +239,10 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)
{ {
int oldbit; int oldbit;
asm volatile("btc %2,%3\n\t" asm volatile("btc %2,%1\n\t"
"sbb %0,%0" "sbb %0,%0"
: "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); : "=r" (oldbit), ADDR
: "Ir" (nr) : "memory");
return oldbit; return oldbit;
} }
...@@ -276,11 +276,10 @@ static inline int variable_test_bit(int nr, volatile const void *addr) ...@@ -276,11 +276,10 @@ static inline int variable_test_bit(int nr, volatile const void *addr)
{ {
int oldbit; int oldbit;
asm volatile("bt %2,%3\n\t" asm volatile("bt %2,%1\n\t"
"sbb %0,%0" "sbb %0,%0"
: "=r" (oldbit) : "=r" (oldbit)
: "m" (((volatile const int *)addr)[nr >> 5]), : "m" (*(unsigned long *)addr), "Ir" (nr));
"Ir" (nr), BASE_ADDR);
return oldbit; return oldbit;
} }
...@@ -397,8 +396,6 @@ static inline int fls(int x) ...@@ -397,8 +396,6 @@ static inline int fls(int x)
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#undef BASE_ADDR
#undef BIT_ADDR
#undef ADDR #undef ADDR
static inline void set_bit_string(unsigned long *bitmap, static inline void set_bit_string(unsigned long *bitmap,
......
...@@ -185,16 +185,14 @@ static inline int is_geode(void) ...@@ -185,16 +185,14 @@ static inline int is_geode(void)
return (is_geode_gx() || is_geode_lx()); return (is_geode_gx() || is_geode_lx());
} }
/* #ifdef CONFIG_MGEODE_LX
* The VSA has virtual registers that we can query for a signature. extern int geode_has_vsa2(void);
*/ #else
static inline int geode_has_vsa2(void) static inline int geode_has_vsa2(void)
{ {
outw(VSA_VR_UNLOCK, VSA_VRC_INDEX); return 0;
outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
return (inw(VSA_VRC_DATA) == VSA_SIG);
} }
#endif
/* MFGPTs */ /* MFGPTs */
......
...@@ -175,7 +175,15 @@ static inline int save_i387(struct _fpstate __user *buf) ...@@ -175,7 +175,15 @@ static inline int save_i387(struct _fpstate __user *buf)
*/ */
static inline int restore_i387(struct _fpstate __user *buf) static inline int restore_i387(struct _fpstate __user *buf)
{ {
set_used_math(); struct task_struct *tsk = current;
int err;
if (!used_math()) {
err = init_fpu(tsk);
if (err)
return err;
}
if (!(task_thread_info(current)->status & TS_USEDFPU)) { if (!(task_thread_info(current)->status & TS_USEDFPU)) {
clts(); clts();
task_thread_info(current)->status |= TS_USEDFPU; task_thread_info(current)->status |= TS_USEDFPU;
......
...@@ -4,7 +4,13 @@ ...@@ -4,7 +4,13 @@
#include <linux/types.h> #include <linux/types.h>
#ifdef CONFIG_X86_PAT
extern int pat_wc_enabled; extern int pat_wc_enabled;
extern void validate_pat_support(struct cpuinfo_x86 *c);
#else
static const int pat_wc_enabled = 0;
static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
#endif
extern void pat_init(void); extern void pat_init(void);
...@@ -12,5 +18,7 @@ extern int reserve_memtype(u64 start, u64 end, ...@@ -12,5 +18,7 @@ extern int reserve_memtype(u64 start, u64 end,
unsigned long req_type, unsigned long *ret_type); unsigned long req_type, unsigned long *ret_type);
extern int free_memtype(u64 start, u64 end); extern int free_memtype(u64 start, u64 end);
extern void pat_disable(char *reason);
#endif #endif
...@@ -25,6 +25,16 @@ ...@@ -25,6 +25,16 @@
#ifndef _ASM_X86_TOPOLOGY_H #ifndef _ASM_X86_TOPOLOGY_H
#define _ASM_X86_TOPOLOGY_H #define _ASM_X86_TOPOLOGY_H
#ifdef CONFIG_X86_32
# ifdef CONFIG_X86_HT
# define ENABLE_TOPO_DEFINES
# endif
#else
# ifdef CONFIG_SMP
# define ENABLE_TOPO_DEFINES
# endif
#endif
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <asm/mpspec.h> #include <asm/mpspec.h>
...@@ -130,10 +140,6 @@ extern unsigned long node_end_pfn[]; ...@@ -130,10 +140,6 @@ extern unsigned long node_end_pfn[];
extern unsigned long node_remap_size[]; extern unsigned long node_remap_size[];
#define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid]) #define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid])
# ifdef CONFIG_X86_HT
# define ENABLE_TOPO_DEFINES
# endif
# define SD_CACHE_NICE_TRIES 1 # define SD_CACHE_NICE_TRIES 1
# define SD_IDLE_IDX 1 # define SD_IDLE_IDX 1
# define SD_NEWIDLE_IDX 2 # define SD_NEWIDLE_IDX 2
...@@ -141,10 +147,6 @@ extern unsigned long node_remap_size[]; ...@@ -141,10 +147,6 @@ extern unsigned long node_remap_size[];
#else #else
# ifdef CONFIG_SMP
# define ENABLE_TOPO_DEFINES
# endif
# define SD_CACHE_NICE_TRIES 2 # define SD_CACHE_NICE_TRIES 2
# define SD_IDLE_IDX 2 # define SD_IDLE_IDX 2
# define SD_NEWIDLE_IDX 2 # define SD_NEWIDLE_IDX 2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment