Commit f0cc6cca authored by Thomas Gleixner's avatar Thomas Gleixner

x86/vector: Simplify the CPU hotplug vector update

With single CPU affinities it's not longer required to scan all interrupts
for potential destination masks which contain the newly booting CPU.

Reduce it to install the active legacy PIC vectors on the newly booting CPU
as those cannot be affinity controlled by the kernel and potentially end up
at any CPU in the system.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarJuergen Gross <jgross@suse.com>
Tested-by: default avatarYu Chen <yu.c.chen@intel.com>
Acked-by: default avatarJuergen Gross <jgross@suse.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Alok Kataria <akataria@vmware.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Rui Zhang <rui.zhang@intel.com>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Len Brown <lenb@kernel.org>
Link: https://lkml.kernel.org/r/20170913213154.388040204@linutronix.de
parent ef9e56d8
...@@ -459,33 +459,32 @@ int __init arch_early_irq_init(void) ...@@ -459,33 +459,32 @@ int __init arch_early_irq_init(void)
return arch_early_ioapic_init(); return arch_early_ioapic_init();
} }
/* Initialize vector_irq on a new cpu */ /* Temporary hack to keep things working */
static void __setup_vector_irq(int cpu) static void vector_update_shutdown_irqs(void)
{ {
struct apic_chip_data *data;
struct irq_desc *desc; struct irq_desc *desc;
int irq, vector; int irq;
/* Mark the inuse vectors */
for_each_irq_desc(irq, desc) { for_each_irq_desc(irq, desc) {
struct irq_data *idata = irq_desc_get_irq_data(desc); struct irq_data *irqd = irq_desc_get_irq_data(desc);
struct apic_chip_data *ad = apic_chip_data(irqd);
data = apic_chip_data(idata); if (ad && cpumask_test_cpu(cpu, ad->domain) && ad->cfg.vector)
if (!data || !cpumask_test_cpu(cpu, data->domain)) this_cpu_write(vector_irq[ad->cfg.vector], desc);
continue;
vector = data->cfg.vector;
per_cpu(vector_irq, cpu)[vector] = desc;
} }
/* Mark the free vectors */ }
for (vector = 0; vector < NR_VECTORS; ++vector) {
desc = per_cpu(vector_irq, cpu)[vector];
if (IS_ERR_OR_NULL(desc))
continue;
data = apic_chip_data(irq_desc_get_irq_data(desc)); static struct irq_desc *__setup_vector_irq(int vector)
if (!cpumask_test_cpu(cpu, data->domain)) {
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; int isairq = vector - ISA_IRQ_VECTOR(0);
}
/* Check whether the irq is in the legacy space */
if (isairq < 0 || isairq >= nr_legacy_irqs())
return VECTOR_UNUSED;
/* Check whether the irq is handled by the IOAPIC */
if (test_bit(isairq, &io_apic_irqs))
return VECTOR_UNUSED;
return irq_to_desc(isairq);
} }
/* /*
...@@ -493,20 +492,27 @@ static void __setup_vector_irq(int cpu) ...@@ -493,20 +492,27 @@ static void __setup_vector_irq(int cpu)
*/ */
void setup_vector_irq(int cpu) void setup_vector_irq(int cpu)
{ {
int irq; unsigned int vector;
lockdep_assert_held(&vector_lock); lockdep_assert_held(&vector_lock);
/* /*
* On most of the platforms, legacy PIC delivers the interrupts on the * The interrupt affinity logic never targets interrupts to offline
* boot cpu. But there are certain platforms where PIC interrupts are * CPUs. The exception are the legacy PIC interrupts. In general
* delivered to multiple cpu's. If the legacy IRQ is handled by the * they are only targeted to CPU0, but depending on the platform
* legacy PIC, for the new cpu that is coming online, setup the static * they can be distributed to any online CPU in hardware. The
* legacy vector to irq mapping: * kernel has no influence on that. So all active legacy vectors
* must be installed on all CPUs. All non legacy interrupts can be
* cleared.
*/ */
for (irq = 0; irq < nr_legacy_irqs(); irq++) for (vector = 0; vector < NR_VECTORS; vector++)
per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq); this_cpu_write(vector_irq[vector], __setup_vector_irq(vector));
__setup_vector_irq(cpu); /*
* Until the rewrite of the managed interrupt management is in
* place it's necessary to walk the irq descriptors and check for
* interrupts which are targeted at this CPU.
*/
vector_update_shutdown_irqs();
} }
static int apic_retrigger_irq(struct irq_data *irq_data) static int apic_retrigger_irq(struct irq_data *irq_data)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment