Commit 3716fd27 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/irq: Check vector allocation early

__assign_irq_vector() uses the vector_cpumask which is assigned by
apic->vector_allocation_domain() without doing basic sanity checks. That can
result in a situation where the final assignement of a newly found vector
fails in apic->cpu_mask_to_apicid_and(). So we have to do rollbacks for no
reason.

apic->cpu_mask_to_apicid_and() only fails if 

  vector_cpumask & requested_cpumask & cpu_online_mask 

is empty.

Check for this condition right away and if the result is empty try immediately
the next possible cpu in the requested mask. So in case of a failure the old
setting is unchanged and we can remove the rollback code.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarBorislav Petkov <bp@alien8.de>
Tested-by: default avatarJoe Lawrence <joe.lawrence@stratus.com>
Cc: Jiang Liu <jiang.liu@linux.intel.com>
Cc: Jeremiah Mahler <jmmahler@gmail.com>
Cc: andy.shevchenko@gmail.com
Cc: Guenter Roeck <linux@roeck-us.net>
Cc: stable@vger.kernel.org #4.3+
Link: http://lkml.kernel.org/r/20151231160106.561877324@linutronix.deSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 95ffeb4b
...@@ -31,7 +31,7 @@ struct apic_chip_data { ...@@ -31,7 +31,7 @@ struct apic_chip_data {
struct irq_domain *x86_vector_domain; struct irq_domain *x86_vector_domain;
EXPORT_SYMBOL_GPL(x86_vector_domain); EXPORT_SYMBOL_GPL(x86_vector_domain);
static DEFINE_RAW_SPINLOCK(vector_lock); static DEFINE_RAW_SPINLOCK(vector_lock);
static cpumask_var_t vector_cpumask, searched_cpumask; static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
static struct irq_chip lapic_controller; static struct irq_chip lapic_controller;
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY]; static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
...@@ -130,8 +130,20 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, ...@@ -130,8 +130,20 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
while (cpu < nr_cpu_ids) { while (cpu < nr_cpu_ids) {
int new_cpu, vector, offset; int new_cpu, vector, offset;
/* Get the possible target cpus for @mask/@cpu from the apic */
apic->vector_allocation_domain(cpu, vector_cpumask, mask); apic->vector_allocation_domain(cpu, vector_cpumask, mask);
/*
* Clear the offline cpus from @vector_cpumask for searching
* and verify whether the result overlaps with @mask. If true,
* then the call to apic->cpu_mask_to_apicid_and() will
* succeed as well. If not, no point in trying to find a
* vector in this mask.
*/
cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
if (!cpumask_intersects(vector_searchmask, mask))
goto next_cpu;
if (cpumask_subset(vector_cpumask, d->domain)) { if (cpumask_subset(vector_cpumask, d->domain)) {
if (cpumask_equal(vector_cpumask, d->domain)) if (cpumask_equal(vector_cpumask, d->domain))
goto success; goto success;
...@@ -164,7 +176,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, ...@@ -164,7 +176,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
if (test_bit(vector, used_vectors)) if (test_bit(vector, used_vectors))
goto next; goto next;
for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) { for_each_cpu(new_cpu, vector_searchmask) {
if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector])) if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
goto next; goto next;
} }
...@@ -176,7 +188,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, ...@@ -176,7 +188,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
d->move_in_progress = d->move_in_progress =
cpumask_intersects(d->old_domain, cpu_online_mask); cpumask_intersects(d->old_domain, cpu_online_mask);
} }
for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) for_each_cpu(new_cpu, vector_searchmask)
per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq); per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
d->cfg.vector = vector; d->cfg.vector = vector;
cpumask_copy(d->domain, vector_cpumask); cpumask_copy(d->domain, vector_cpumask);
...@@ -198,8 +210,14 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, ...@@ -198,8 +210,14 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
return -ENOSPC; return -ENOSPC;
success: success:
/* cache destination APIC IDs into cfg->dest_apicid */ /*
return apic->cpu_mask_to_apicid_and(mask, d->domain, &d->cfg.dest_apicid); * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
* as we already established, that mask & d->domain & cpu_online_mask
* is not empty.
*/
BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
&d->cfg.dest_apicid));
return 0;
} }
static int assign_irq_vector(int irq, struct apic_chip_data *data, static int assign_irq_vector(int irq, struct apic_chip_data *data,
...@@ -409,6 +427,7 @@ int __init arch_early_irq_init(void) ...@@ -409,6 +427,7 @@ int __init arch_early_irq_init(void)
arch_init_htirq_domain(x86_vector_domain); arch_init_htirq_domain(x86_vector_domain);
BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL)); BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL)); BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
return arch_early_ioapic_init(); return arch_early_ioapic_init();
...@@ -498,14 +517,7 @@ static int apic_set_affinity(struct irq_data *irq_data, ...@@ -498,14 +517,7 @@ static int apic_set_affinity(struct irq_data *irq_data,
return -EINVAL; return -EINVAL;
err = assign_irq_vector(irq, data, dest); err = assign_irq_vector(irq, data, dest);
if (err) { return err ? err : IRQ_SET_MASK_OK;
if (assign_irq_vector(irq, data,
irq_data_get_affinity_mask(irq_data)))
pr_err("Failed to recover vector for irq %d\n", irq);
return err;
}
return IRQ_SET_MASK_OK;
} }
static struct irq_chip lapic_controller = { static struct irq_chip lapic_controller = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment