Commit 5ca5a885 authored by Zwane Mwaikambo's avatar Zwane Mwaikambo Committed by Linus Torvalds

[PATCH] bluesmoke merge

This patch merges in all the currently outstanding bluesmoke bits from
2.5-dj to 2.5.20, it also has the pleasant side effect of fixing the
compilation. Test compiled with and without MCE.
parent c8e2aa59
...@@ -18,8 +18,24 @@ ...@@ -18,8 +18,24 @@
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
/* as supported by the P4/Xeon family */
struct intel_mce_extended_msrs {
u32 eax;
u32 ebx;
u32 ecx;
u32 edx;
u32 esi;
u32 edi;
u32 ebp;
u32 esp;
u32 eflags;
u32 eip;
/* u32 *reserved[]; */
};
static int mce_disabled __initdata = 0; static int mce_disabled __initdata = 0;
static int mce_num_extended_msrs = 0;
static int banks; static int banks;
...@@ -75,47 +91,73 @@ static void __init intel_init_thermal(struct cpuinfo_x86 *c) ...@@ -75,47 +91,73 @@ static void __init intel_init_thermal(struct cpuinfo_x86 *c)
if (!cpu_has(c, X86_FEATURE_ACC)) if (!cpu_has(c, X86_FEATURE_ACC))
return; /* -ENODEV */ return; /* -ENODEV */
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
/* first check if its enabled already, in which case there might /* first check if its enabled already, in which case there might
* be some SMM goo which handles it, so we can't even put a handler * be some SMM goo which handles it, so we can't even put a handler
* since it might be delivered via SMI already -zwanem. * since it might be delivered via SMI already -zwanem.
*/ */
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
if (l & (1<<3)) { h = apic_read(APIC_LVTTHMR);
printk(KERN_DEBUG "CPU#%d: Thermal monitoring already enabled\n", cpu); if ((l & (1<<3)) && (h & APIC_DM_SMI)) {
} else { printk(KERN_DEBUG "CPU#%d: Thermal monitoring handled by SMI\n", cpu);
wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h); return; /* -EBUSY */
printk(KERN_INFO "CPU#%d: Thermal monitoring enabled\n", cpu);
} }
/* check whether a vector already exists */ /* check whether a vector already exists, temporarily masked? */
l = apic_read(APIC_LVTTHMR); if (h & APIC_VECTOR_MASK) {
if (l & 0xff) { printk(KERN_DEBUG "CPU#%d: Thermal LVT vector (%#x) already installed\n",
printk(KERN_DEBUG "CPU#%d: Thermal LVT already handled\n", cpu); cpu, (h & APIC_VECTOR_MASK));
return; /* -EBUSY */ return; /* -EBUSY */
} }
wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h);
printk(KERN_INFO "CPU#%d: Thermal monitoring enabled\n", cpu);
/* The temperature transition interrupt handler setup */ /* The temperature transition interrupt handler setup */
l = THERMAL_APIC_VECTOR; /* our delivery vector */ h = THERMAL_APIC_VECTOR; /* our delivery vector */
l |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */ h |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */
apic_write_around(APIC_LVTTHMR, l); apic_write_around(APIC_LVTTHMR, h);
rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x3 , h); wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03 , h);
/* ok we're good to go... */ /* ok we're good to go... */
vendor_thermal_interrupt = intel_thermal_interrupt; vendor_thermal_interrupt = intel_thermal_interrupt;
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h);
l = apic_read(APIC_LVTTHMR); l = apic_read(APIC_LVTTHMR);
apic_write_around(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); apic_write_around(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
printk(KERN_INFO "CPU#%d: Thermal monitoring enabled\n", cpu);
return; return;
} }
#endif /* CONFIG_X86_MCE_P4THERMAL */ #endif /* CONFIG_X86_MCE_P4THERMAL */
/* P4/Xeon Extended MCE MSR retrieval, return 0 if unsupported */
static int inline intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
{
u32 h;
if (mce_num_extended_msrs == 0)
goto done;
rdmsr(MSR_IA32_MCG_EAX, r->eax, h);
rdmsr(MSR_IA32_MCG_EBX, r->ebx, h);
rdmsr(MSR_IA32_MCG_ECX, r->ecx, h);
rdmsr(MSR_IA32_MCG_EDX, r->edx, h);
rdmsr(MSR_IA32_MCG_ESI, r->esi, h);
rdmsr(MSR_IA32_MCG_EDI, r->edi, h);
rdmsr(MSR_IA32_MCG_EBP, r->ebp, h);
rdmsr(MSR_IA32_MCG_ESP, r->esp, h);
rdmsr(MSR_IA32_MCG_EFLAGS, r->eflags, h);
rdmsr(MSR_IA32_MCG_EIP, r->eip, h);
/* can we rely on kmalloc to do a dynamic
* allocation for the reserved registers?
*/
done:
return mce_num_extended_msrs;
}
/* /*
* Machine Check Handler For PII/PIII * Machine Check Handler For PII/PIII
*/ */
...@@ -126,6 +168,7 @@ static void intel_machine_check(struct pt_regs * regs, long error_code) ...@@ -126,6 +168,7 @@ static void intel_machine_check(struct pt_regs * regs, long error_code)
u32 alow, ahigh, high, low; u32 alow, ahigh, high, low;
u32 mcgstl, mcgsth; u32 mcgstl, mcgsth;
int i; int i;
struct intel_mce_extended_msrs dbg;
rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
if(mcgstl&(1<<0)) /* Recoverable ? */ if(mcgstl&(1<<0)) /* Recoverable ? */
...@@ -133,6 +176,15 @@ static void intel_machine_check(struct pt_regs * regs, long error_code) ...@@ -133,6 +176,15 @@ static void intel_machine_check(struct pt_regs * regs, long error_code)
printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", smp_processor_id(), mcgsth, mcgstl); printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", smp_processor_id(), mcgsth, mcgstl);
if (intel_get_extended_msrs(&dbg)) {
printk(KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n",
smp_processor_id(), dbg.eip, dbg.eflags);
printk(KERN_DEBUG "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n",
dbg.eax, dbg.ebx, dbg.ecx, dbg.edx);
printk(KERN_DEBUG "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n",
dbg.esi, dbg.edi, dbg.ebp, dbg.esp);
}
for (i=0;i<banks;i++) { for (i=0;i<banks;i++) {
rdmsr(MSR_IA32_MC0_STATUS+i*4,low, high); rdmsr(MSR_IA32_MC0_STATUS+i*4,low, high);
if(high&(1<<31)) { if(high&(1<<31)) {
...@@ -334,11 +386,24 @@ static void __init intel_mcheck_init(struct cpuinfo_x86 *c) ...@@ -334,11 +386,24 @@ static void __init intel_mcheck_init(struct cpuinfo_x86 *c)
set_in_cr4(X86_CR4_MCE); set_in_cr4(X86_CR4_MCE);
printk(KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", smp_processor_id()); printk(KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", smp_processor_id());
/*
* Check for P4/Xeon specific MCE extensions
*/
if (c->x86_vendor == X86_VENDOR_INTEL && c->x86 == 15) {
/* Check for P4/Xeon extended MCE MSRs */
rdmsr(MSR_IA32_MCG_CAP, l, h);
if (l & (1<<9)) {/* MCG_EXT_P */
mce_num_extended_msrs = (l >> 16) & 0xff;
printk(KERN_INFO "CPU#%d: Intel P4/Xeon Extended MCE MSRs (%d) available\n",
smp_processor_id(), mce_num_extended_msrs);
}
#ifdef CONFIG_X86_MCE_P4THERMAL #ifdef CONFIG_X86_MCE_P4THERMAL
/* Only enable thermal throttling warning on Pentium 4. */ /* Check for P4/Xeon Thermal monitor */
if (c->x86_vendor == X86_VENDOR_INTEL && c->x86 == 15)
intel_init_thermal(c); intel_init_thermal(c);
#endif #endif
}
done=1; done=1;
} }
......
...@@ -28,9 +28,6 @@ extern int irq_vector[NR_IRQS]; ...@@ -28,9 +28,6 @@ extern int irq_vector[NR_IRQS];
extern void (*interrupt[NR_IRQS])(void); extern void (*interrupt[NR_IRQS])(void);
extern asmlinkage void thermal_interrupt(void);
extern asmlinkage void smp_thermal_interrupt(struct pt_regs);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern asmlinkage void reschedule_interrupt(void); extern asmlinkage void reschedule_interrupt(void);
extern asmlinkage void invalidate_interrupt(void); extern asmlinkage void invalidate_interrupt(void);
...@@ -41,6 +38,7 @@ extern asmlinkage void call_function_interrupt(void); ...@@ -41,6 +38,7 @@ extern asmlinkage void call_function_interrupt(void);
extern asmlinkage void apic_timer_interrupt(void); extern asmlinkage void apic_timer_interrupt(void);
extern asmlinkage void error_interrupt(void); extern asmlinkage void error_interrupt(void);
extern asmlinkage void spurious_interrupt(void); extern asmlinkage void spurious_interrupt(void);
extern asmlinkage void thermal_interrupt(struct pt_regs);
#endif #endif
extern void mask_irq(unsigned int irq); extern void mask_irq(unsigned int irq);
......
...@@ -57,8 +57,21 @@ ...@@ -57,8 +57,21 @@
#define MSR_IA32_MCG_STATUS 0x17a #define MSR_IA32_MCG_STATUS 0x17a
#define MSR_IA32_MCG_CTL 0x17b #define MSR_IA32_MCG_CTL 0x17b
#define MSR_P6_EVNTSEL0 0x186 /* P4/Xeon+ specific */
#define MSR_P6_EVNTSEL1 0x187 #define MSR_IA32_MCG_EAX 0x180
#define MSR_IA32_MCG_EBX 0x181
#define MSR_IA32_MCG_ECX 0x182
#define MSR_IA32_MCG_EDX 0x183
#define MSR_IA32_MCG_ESI 0x184
#define MSR_IA32_MCG_EDI 0x185
#define MSR_IA32_MCG_EBP 0x186
#define MSR_IA32_MCG_ESP 0x187
#define MSR_IA32_MCG_EFLAGS 0x188
#define MSR_IA32_MCG_EIP 0x189
#define MSR_IA32_MCG_RESERVED 0x18A
#define MSR_P6_EVNTSEL0 0x186
#define MSR_P6_EVNTSEL1 0x187
#define MSR_IA32_THERM_CONTROL 0x19a #define MSR_IA32_THERM_CONTROL 0x19a
#define MSR_IA32_THERM_INTERRUPT 0x19b #define MSR_IA32_THERM_INTERRUPT 0x19b
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment