Commit 647802d6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: APIC: enable workaround on AMD Fam10h CPUs
  xen: disable interrupts before saving in percpu
  x86: add x86@kernel.org to MAINTAINERS
  x86: push old stack address on irqstack for unwinder
  irq, x86: fix lock status with numa_migrate_irq_desc
  x86: add cache descriptors for Intel Core i7
  x86/Voyager: make it build and boot
parents 3e561f97 bb960a1e
...@@ -4841,6 +4841,7 @@ P: Ingo Molnar ...@@ -4841,6 +4841,7 @@ P: Ingo Molnar
M: mingo@redhat.com M: mingo@redhat.com
P: H. Peter Anvin P: H. Peter Anvin
M: hpa@zytor.com M: hpa@zytor.com
M: x86@kernel.org
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
T: git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git T: git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
S: Maintained S: Maintained
......
...@@ -1436,7 +1436,7 @@ static int __init detect_init_APIC(void) ...@@ -1436,7 +1436,7 @@ static int __init detect_init_APIC(void)
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
(boot_cpu_data.x86 == 15)) (boot_cpu_data.x86 >= 15))
break; break;
goto no_apic; goto no_apic;
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
......
...@@ -36,8 +36,11 @@ static struct _cache_table cache_table[] __cpuinitdata = ...@@ -36,8 +36,11 @@ static struct _cache_table cache_table[] __cpuinitdata =
{ {
{ 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
{ 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
{ 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
{ 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
{ 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
{ 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
{ 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
{ 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
{ 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
{ 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
...@@ -85,6 +88,18 @@ static struct _cache_table cache_table[] __cpuinitdata = ...@@ -85,6 +88,18 @@ static struct _cache_table cache_table[] __cpuinitdata =
{ 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */ { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
{ 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
{ 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */ { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
{ 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
{ 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
{ 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
{ 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
{ 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
{ 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
{ 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
{ 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
{ 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
{ 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
{ 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
{ 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
{ 0x00, 0, 0} { 0x00, 0, 0}
}; };
......
...@@ -346,6 +346,7 @@ ENTRY(save_args) ...@@ -346,6 +346,7 @@ ENTRY(save_args)
popq_cfi %rax /* move return address... */ popq_cfi %rax /* move return address... */
mov %gs:pda_irqstackptr,%rsp mov %gs:pda_irqstackptr,%rsp
EMPTY_FRAME 0 EMPTY_FRAME 0
pushq_cfi %rbp /* backlink for unwinder */
pushq_cfi %rax /* ... to the new stack */ pushq_cfi %rax /* ... to the new stack */
/* /*
* We entered an interrupt context - irqs are off: * We entered an interrupt context - irqs are off:
......
...@@ -2528,14 +2528,15 @@ static void irq_complete_move(struct irq_desc **descp) ...@@ -2528,14 +2528,15 @@ static void irq_complete_move(struct irq_desc **descp)
vector = ~get_irq_regs()->orig_ax; vector = ~get_irq_regs()->orig_ax;
me = smp_processor_id(); me = smp_processor_id();
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) {
#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
*descp = desc = move_irq_desc(desc, me); *descp = desc = move_irq_desc(desc, me);
/* get the new one */ /* get the new one */
cfg = desc->chip_data; cfg = desc->chip_data;
#endif #endif
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
send_cleanup_vector(cfg); send_cleanup_vector(cfg);
}
} }
#else #else
static inline void irq_complete_move(struct irq_desc **descp) {} static inline void irq_complete_move(struct irq_desc **descp) {}
......
...@@ -78,15 +78,6 @@ void __init init_ISA_irqs(void) ...@@ -78,15 +78,6 @@ void __init init_ISA_irqs(void)
} }
} }
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = {
.handler = no_action,
.mask = CPU_MASK_NONE,
.name = "cascade",
};
DEFINE_PER_CPU(vector_irq_t, vector_irq) = { DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
[0 ... IRQ0_VECTOR - 1] = -1, [0 ... IRQ0_VECTOR - 1] = -1,
[IRQ0_VECTOR] = 0, [IRQ0_VECTOR] = 0,
...@@ -178,9 +169,6 @@ void __init native_init_IRQ(void) ...@@ -178,9 +169,6 @@ void __init native_init_IRQ(void)
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
#endif #endif
if (!acpi_ioapic)
setup_irq(2, &irq2);
/* setup after call gates are initialised (usually add in /* setup after call gates are initialised (usually add in
* the architecture specific gates) * the architecture specific gates)
*/ */
......
...@@ -38,6 +38,15 @@ void __init pre_intr_init_hook(void) ...@@ -38,6 +38,15 @@ void __init pre_intr_init_hook(void)
init_ISA_irqs(); init_ISA_irqs();
} }
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = {
.handler = no_action,
.mask = CPU_MASK_NONE,
.name = "cascade",
};
/** /**
* intr_init_hook - post gate setup interrupt initialisation * intr_init_hook - post gate setup interrupt initialisation
* *
...@@ -53,6 +62,9 @@ void __init intr_init_hook(void) ...@@ -53,6 +62,9 @@ void __init intr_init_hook(void)
if (x86_quirks->arch_intr_init()) if (x86_quirks->arch_intr_init())
return; return;
} }
if (!acpi_ioapic)
setup_irq(2, &irq2);
} }
/** /**
......
...@@ -33,13 +33,23 @@ void __init intr_init_hook(void) ...@@ -33,13 +33,23 @@ void __init intr_init_hook(void)
setup_irq(2, &irq2); setup_irq(2, &irq2);
} }
void __init pre_setup_arch_hook(void) static void voyager_disable_tsc(void)
{ {
/* Voyagers run their CPUs from independent clocks, so disable /* Voyagers run their CPUs from independent clocks, so disable
* the TSC code because we can't sync them */ * the TSC code because we can't sync them */
setup_clear_cpu_cap(X86_FEATURE_TSC); setup_clear_cpu_cap(X86_FEATURE_TSC);
} }
void __init pre_setup_arch_hook(void)
{
voyager_disable_tsc();
}
void __init pre_time_init_hook(void)
{
voyager_disable_tsc();
}
void __init trap_init_hook(void) void __init trap_init_hook(void)
{ {
} }
......
...@@ -81,7 +81,7 @@ static void enable_local_vic_irq(unsigned int irq); ...@@ -81,7 +81,7 @@ static void enable_local_vic_irq(unsigned int irq);
static void disable_local_vic_irq(unsigned int irq); static void disable_local_vic_irq(unsigned int irq);
static void before_handle_vic_irq(unsigned int irq); static void before_handle_vic_irq(unsigned int irq);
static void after_handle_vic_irq(unsigned int irq); static void after_handle_vic_irq(unsigned int irq);
static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask);
static void ack_vic_irq(unsigned int irq); static void ack_vic_irq(unsigned int irq);
static void vic_enable_cpi(void); static void vic_enable_cpi(void);
static void do_boot_cpu(__u8 cpuid); static void do_boot_cpu(__u8 cpuid);
...@@ -211,8 +211,6 @@ static __u32 cpu_booted_map; ...@@ -211,8 +211,6 @@ static __u32 cpu_booted_map;
static cpumask_t smp_commenced_mask = CPU_MASK_NONE; static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
/* This is for the new dynamic CPU boot code */ /* This is for the new dynamic CPU boot code */
cpumask_t cpu_callin_map = CPU_MASK_NONE;
cpumask_t cpu_callout_map = CPU_MASK_NONE;
/* The per processor IRQ masks (these are usually kept in sync) */ /* The per processor IRQ masks (these are usually kept in sync) */
static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
...@@ -378,7 +376,7 @@ void __init find_smp_config(void) ...@@ -378,7 +376,7 @@ void __init find_smp_config(void)
cpus_addr(phys_cpu_present_map)[0] |= cpus_addr(phys_cpu_present_map)[0] |=
voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
3) << 24; 3) << 24;
cpu_possible_map = phys_cpu_present_map; init_cpu_possible(&phys_cpu_present_map);
printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
cpus_addr(phys_cpu_present_map)[0]); cpus_addr(phys_cpu_present_map)[0]);
/* Here we set up the VIC to enable SMP */ /* Here we set up the VIC to enable SMP */
...@@ -1599,16 +1597,16 @@ static void after_handle_vic_irq(unsigned int irq) ...@@ -1599,16 +1597,16 @@ static void after_handle_vic_irq(unsigned int irq)
* change the mask and then do an interrupt enable CPI to re-enable on * change the mask and then do an interrupt enable CPI to re-enable on
* the selected processors */ * the selected processors */
void set_vic_irq_affinity(unsigned int irq, cpumask_t mask) void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask)
{ {
/* Only extended processors handle interrupts */ /* Only extended processors handle interrupts */
unsigned long real_mask; unsigned long real_mask;
unsigned long irq_mask = 1 << irq; unsigned long irq_mask = 1 << irq;
int cpu; int cpu;
real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors;
if (cpus_addr(mask)[0] == 0) if (cpus_addr(*mask)[0] == 0)
/* can't have no CPUs to accept the interrupt -- extremely /* can't have no CPUs to accept the interrupt -- extremely
* bad things will happen */ * bad things will happen */
return; return;
...@@ -1750,10 +1748,11 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void) ...@@ -1750,10 +1748,11 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void)
init_gdt(smp_processor_id()); init_gdt(smp_processor_id());
switch_to_new_gdt(); switch_to_new_gdt();
cpu_set(smp_processor_id(), cpu_online_map); cpu_online_map = cpumask_of_cpu(smp_processor_id());
cpu_set(smp_processor_id(), cpu_callout_map); cpu_callout_map = cpumask_of_cpu(smp_processor_id());
cpu_set(smp_processor_id(), cpu_possible_map); cpu_callin_map = CPU_MASK_NONE;
cpu_set(smp_processor_id(), cpu_present_map); cpu_present_map = cpumask_of_cpu(smp_processor_id());
} }
static int __cpuinit voyager_cpu_up(unsigned int cpu) static int __cpuinit voyager_cpu_up(unsigned int cpu)
...@@ -1783,9 +1782,9 @@ void __init smp_setup_processor_id(void) ...@@ -1783,9 +1782,9 @@ void __init smp_setup_processor_id(void)
x86_write_percpu(cpu_number, hard_smp_processor_id()); x86_write_percpu(cpu_number, hard_smp_processor_id());
} }
static void voyager_send_call_func(cpumask_t callmask) static void voyager_send_call_func(const struct cpumask *callmask)
{ {
__u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id());
send_CPI(mask, VIC_CALL_FUNCTION_CPI); send_CPI(mask, VIC_CALL_FUNCTION_CPI);
} }
......
...@@ -19,8 +19,10 @@ DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags); ...@@ -19,8 +19,10 @@ DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags);
paired with xen_mc_issue() */ paired with xen_mc_issue() */
static inline void xen_mc_batch(void) static inline void xen_mc_batch(void)
{ {
unsigned long flags;
/* need to disable interrupts until this entry is complete */ /* need to disable interrupts until this entry is complete */
local_irq_save(__get_cpu_var(xen_mc_irq_flags)); local_irq_save(flags);
__get_cpu_var(xen_mc_irq_flags) = flags;
} }
static inline struct multicall_space xen_mc_entry(size_t args) static inline struct multicall_space xen_mc_entry(size_t args)
......
...@@ -84,10 +84,15 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, ...@@ -84,10 +84,15 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
init_copy_one_irq_desc(irq, old_desc, desc, cpu); init_copy_one_irq_desc(irq, old_desc, desc, cpu);
irq_desc_ptrs[irq] = desc; irq_desc_ptrs[irq] = desc;
spin_unlock_irqrestore(&sparse_irq_lock, flags);
/* free the old one */ /* free the old one */
free_one_irq_desc(old_desc, desc); free_one_irq_desc(old_desc, desc);
spin_unlock(&old_desc->lock);
kfree(old_desc); kfree(old_desc);
spin_lock(&desc->lock);
return desc;
out_unlock: out_unlock:
spin_unlock_irqrestore(&sparse_irq_lock, flags); spin_unlock_irqrestore(&sparse_irq_lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment