Commit f6e9ce2b authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Greg Kroah-Hartman

x86/irq: Reorganize the return path in assign_irq_vector

commit 433cbd57 upstream.

Use an explicit goto for the cases where we have success in the search/update
and return -ENOSPC if the search loop ends due to no space.

Preparatory patch for fixes. No functional change.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarBorislav Petkov <bp@alien8.de>
Tested-by: default avatarJoe Lawrence <joe.lawrence@stratus.com>
Cc: Jiang Liu <jiang.liu@linux.intel.com>
Cc: Jeremiah Mahler <jmmahler@gmail.com>
Cc: andy.shevchenko@gmail.com
Cc: Guenter Roeck <linux@roeck-us.net>
Link: http://lkml.kernel.org/r/20151231160106.403491024@linutronix.deSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent cf2e82af
...@@ -116,13 +116,12 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, ...@@ -116,13 +116,12 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
*/ */
static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
static int current_offset = VECTOR_OFFSET_START % 16; static int current_offset = VECTOR_OFFSET_START % 16;
int cpu, err; int cpu;
if (d->move_in_progress) if (d->move_in_progress)
return -EBUSY; return -EBUSY;
/* Only try and allocate irqs on cpus that are present */ /* Only try and allocate irqs on cpus that are present */
err = -ENOSPC;
cpumask_clear(d->old_domain); cpumask_clear(d->old_domain);
cpumask_clear(searched_cpumask); cpumask_clear(searched_cpumask);
cpu = cpumask_first_and(mask, cpu_online_mask); cpu = cpumask_first_and(mask, cpu_online_mask);
...@@ -132,9 +131,8 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, ...@@ -132,9 +131,8 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
apic->vector_allocation_domain(cpu, vector_cpumask, mask); apic->vector_allocation_domain(cpu, vector_cpumask, mask);
if (cpumask_subset(vector_cpumask, d->domain)) { if (cpumask_subset(vector_cpumask, d->domain)) {
err = 0;
if (cpumask_equal(vector_cpumask, d->domain)) if (cpumask_equal(vector_cpumask, d->domain))
break; goto success;
/* /*
* New cpumask using the vector is a proper subset of * New cpumask using the vector is a proper subset of
* the current in use mask. So cleanup the vector * the current in use mask. So cleanup the vector
...@@ -145,7 +143,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, ...@@ -145,7 +143,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
d->move_in_progress = d->move_in_progress =
cpumask_intersects(d->old_domain, cpu_online_mask); cpumask_intersects(d->old_domain, cpu_online_mask);
cpumask_and(d->domain, d->domain, vector_cpumask); cpumask_and(d->domain, d->domain, vector_cpumask);
break; goto success;
} }
vector = current_vector; vector = current_vector;
...@@ -185,17 +183,13 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, ...@@ -185,17 +183,13 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq); per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
d->cfg.vector = vector; d->cfg.vector = vector;
cpumask_copy(d->domain, vector_cpumask); cpumask_copy(d->domain, vector_cpumask);
err = 0; goto success;
break;
} }
return -ENOSPC;
if (!err) { success:
/* cache destination APIC IDs into cfg->dest_apicid */ /* cache destination APIC IDs into cfg->dest_apicid */
err = apic->cpu_mask_to_apicid_and(mask, d->domain, return apic->cpu_mask_to_apicid_and(mask, d->domain, &d->cfg.dest_apicid);
&d->cfg.dest_apicid);
}
return err;
} }
static int assign_irq_vector(int irq, struct apic_chip_data *data, static int assign_irq_vector(int irq, struct apic_chip_data *data,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment