Commit 497c9a19 authored by Yinghai Lu's avatar Yinghai Lu Committed by Ingo Molnar

x86: make 32bit support per_cpu vector

so we can merge io_apic_32.c and io_apic_64.c

v2: Use cpu_online_map as target cpus for bigsmp, just like 64-bit is doing.

Also remove some unused TARGET_CPUS macro.

v3: need to check if desc is null in smp_irq_move_cleanup

also migration needs to reset vector too, so copy __target_IO_APIC_irq
from 64bit.

(the duplication will go away once the two files are unified.)
Signed-off-by: default avatarYinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 199751d7
......@@ -629,7 +629,7 @@ ENTRY(interrupt)
ENTRY(irq_entries_start)
RING0_INT_FRAME
vector=0
.rept NR_IRQS
.rept NR_VECTORS
ALIGN
.if vector
CFI_ADJUST_CFA_OFFSET -4
......
This diff is collapsed.
......@@ -223,21 +223,25 @@ unsigned int do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs;
/* high bit used in ret_from_ code */
int overflow, irq = ~regs->orig_ax;
int overflow;
unsigned vector = ~regs->orig_ax;
struct irq_desc *desc;
unsigned irq;
desc = irq_to_desc(irq);
if (unlikely(!desc)) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
__func__, irq);
BUG();
}
old_regs = set_irq_regs(regs);
irq_enter();
irq = __get_cpu_var(vector_irq)[vector];
overflow = check_stack_overflow();
desc = irq_to_desc(irq);
if (unlikely(!desc)) {
printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x\n",
__func__, irq, vector);
BUG();
}
if (!execute_on_irq_stack(overflow, desc, irq)) {
if (unlikely(overflow))
print_stack_overflow();
......
......@@ -90,6 +90,27 @@ static struct irqaction irq2 = {
.name = "cascade",
};
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
[0 ... IRQ0_VECTOR - 1] = -1,
[IRQ0_VECTOR] = 0,
[IRQ1_VECTOR] = 1,
[IRQ2_VECTOR] = 2,
[IRQ3_VECTOR] = 3,
[IRQ4_VECTOR] = 4,
[IRQ5_VECTOR] = 5,
[IRQ6_VECTOR] = 6,
[IRQ7_VECTOR] = 7,
[IRQ8_VECTOR] = 8,
[IRQ9_VECTOR] = 9,
[IRQ10_VECTOR] = 10,
[IRQ11_VECTOR] = 11,
[IRQ12_VECTOR] = 12,
[IRQ13_VECTOR] = 13,
[IRQ14_VECTOR] = 14,
[IRQ15_VECTOR] = 15,
[IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
};
/* Overridden in paravirt.c */
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
......@@ -105,22 +126,14 @@ void __init native_init_IRQ(void)
* us. (some of these will be overridden and become
* 'special' SMP interrupts)
*/
for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
int vector = FIRST_EXTERNAL_VECTOR + i;
if (i >= nr_irqs)
break;
for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
/* SYSCALL_VECTOR was reserved in trap_init. */
if (!test_bit(vector, used_vectors))
set_intr_gate(vector, interrupt[i]);
if (i != SYSCALL_VECTOR)
set_intr_gate(i, interrupt[i]);
}
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
/*
* IRQ0 must be given a fixed assignment and initialized,
* because it's used before the IO-APIC is set up.
*/
set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
/*
* The reschedule interrupt is a CPU-to-CPU reschedule-helper
* IPI, driven by wakeup.
......@@ -135,6 +148,9 @@ void __init native_init_IRQ(void)
/* IPI for single call function */
set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt);
/* Low priority IPI to cleanup after moving an irq */
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
#endif
#ifdef CONFIG_X86_LOCAL_APIC
......@@ -168,3 +184,4 @@ void __init native_init_IRQ(void)
irq_ctx_init(smp_processor_id());
}
......@@ -582,7 +582,7 @@ static void __init lguest_init_IRQ(void)
for (i = 0; i < LGUEST_IRQS; i++) {
int vector = FIRST_EXTERNAL_VECTOR + i;
if (vector != SYSCALL_VECTOR) {
set_intr_gate(vector, interrupt[i]);
set_intr_gate(vector, interrupt[vector]);
set_irq_chip_and_handler_name(i, &lguest_irq_controller,
handle_level_irq,
"level");
......
......@@ -41,6 +41,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
{ }
};
static cpumask_t vector_allocation_domain(int cpu)
{
return cpumask_of_cpu(cpu);
}
static int probe_bigsmp(void)
{
......
......@@ -75,4 +75,18 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
}
#endif
static cpumask_t vector_allocation_domain(int cpu)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
return domain;
}
struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
......@@ -38,4 +38,18 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 0;
}
static cpumask_t vector_allocation_domain(int cpu)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
return domain;
}
struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
......@@ -23,4 +23,18 @@ static int probe_summit(void)
return 0;
}
static cpumask_t vector_allocation_domain(int cpu)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
return domain;
}
struct genapic apic_summit = APIC_INIT("summit", probe_summit);
......@@ -9,22 +9,17 @@ static inline int apic_id_registered(void)
return (1);
}
/* Round robin the irqs amoung the online cpus */
static inline cpumask_t target_cpus(void)
{
static unsigned long cpu = NR_CPUS;
do {
if (cpu >= NR_CPUS)
cpu = first_cpu(cpu_online_map);
else
cpu = next_cpu(cpu, cpu_online_map);
} while (cpu >= NR_CPUS);
return cpumask_of_cpu(cpu);
#ifdef CONFIG_SMP
return cpu_online_map;
#else
return cpumask_of_cpu(0);
#endif
}
#undef APIC_DEST_LOGICAL
#define APIC_DEST_LOGICAL 0
#define TARGET_CPUS (target_cpus())
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
#define INT_DELIVERY_MODE (dest_Fixed)
#define INT_DEST_MODE (0) /* phys delivery to target proc */
......
......@@ -17,7 +17,6 @@ static inline cpumask_t target_cpus(void)
return cpumask_of_cpu(smp_processor_id());
#endif
}
#define TARGET_CPUS (target_cpus())
#if defined CONFIG_ES7000_CLUSTERED_APIC
#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
......@@ -81,7 +80,7 @@ static inline void setup_apic_routing(void)
int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
(apic_version[apic] == 0x14) ?
"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]);
"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]);
}
static inline int multi_timer_check(int apic, int irq)
......
......@@ -57,6 +57,7 @@ struct genapic {
unsigned (*get_apic_id)(unsigned long x);
unsigned long apic_id_mask;
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
cpumask_t (*vector_allocation_domain)(int cpu);
#ifdef CONFIG_SMP
/* ipi */
......@@ -104,6 +105,7 @@ struct genapic {
APICFUNC(get_apic_id) \
.apic_id_mask = APIC_ID_MASK, \
APICFUNC(cpu_mask_to_apicid) \
APICFUNC(vector_allocation_domain) \
APICFUNC(acpi_madt_oem_check) \
IPIFUNC(send_IPI_mask) \
IPIFUNC(send_IPI_allbutself) \
......
......@@ -116,12 +116,12 @@ extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
#ifdef CONFIG_X86_32
extern void (*const interrupt[NR_IRQS])(void);
#else
#endif
typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
#endif
#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_X86_64)
#ifdef CONFIG_X86_IO_APIC
extern void lock_vector_lock(void);
extern void unlock_vector_lock(void);
extern void __setup_vector_irq(int cpu);
......
......@@ -19,19 +19,14 @@
/*
* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
* cleanup after irq migration on 64 bit.
* cleanup after irq migration.
*/
#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
/*
* Vectors 0x20-0x2f are used for ISA interrupts on 32 bit.
* Vectors 0x30-0x3f are used for ISA interrupts on 64 bit.
* Vectors 0x30-0x3f are used for ISA interrupts.
*/
#ifdef CONFIG_X86_32
#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR)
#else
#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
#endif
#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
......@@ -96,11 +91,7 @@
* start at 0x31(0x41) to spread out vectors evenly between priority
* levels. (0x80 is the syscall vector)
*/
#ifdef CONFIG_X86_32
# define FIRST_DEVICE_VECTOR 0x31
#else
# define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
#endif
#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
#define NR_VECTORS 256
......
......@@ -14,6 +14,7 @@ BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
#endif
/*
......
......@@ -85,6 +85,20 @@ static inline int apicid_to_node(int logical_apicid)
return 0;
#endif
}
static inline cpumask_t vector_allocation_domain(int cpu)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
return domain;
}
#endif
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
......@@ -138,6 +152,5 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
static inline void enable_apic_mode(void)
{
}
#endif /* CONFIG_X86_LOCAL_APIC */
#endif /* ASM_X86__MACH_DEFAULT__MACH_APIC_H */
......@@ -24,6 +24,7 @@
#define check_phys_apicid_present (genapic->check_phys_apicid_present)
#define check_apicid_used (genapic->check_apicid_used)
#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
#define vector_allocation_domain (genapic->vector_allocation_domain)
#define enable_apic_mode (genapic->enable_apic_mode)
#define phys_pkg_id (genapic->phys_pkg_id)
......
......@@ -12,8 +12,6 @@ static inline cpumask_t target_cpus(void)
return CPU_MASK_ALL;
}
#define TARGET_CPUS (target_cpus())
#define NO_BALANCE_IRQ (1)
#define esr_disable (1)
......
......@@ -22,7 +22,6 @@ static inline cpumask_t target_cpus(void)
*/
return cpumask_of_cpu(0);
}
#define TARGET_CPUS (target_cpus())
#define INT_DELIVERY_MODE (dest_LowestPrio)
#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment