Commit a0db36ed authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'irq-urgent-2024-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Ingo Molnar:

 - Fix x86 IRQ vector leak caused by a CPU offlining race

 - Fix build failure in the riscv-imsic irqchip driver
   caused by an API-change semantic conflict

 - Fix use-after-free in irq_find_at_or_after()

* tag 'irq-urgent-2024-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  genirq/irqdesc: Prevent use-after-free in irq_find_at_or_after()
  genirq/cpuhotplug, x86/vector: Prevent vector leak during CPU offline
  irqchip/riscv-imsic: Fixup riscv_ipi_set_virq_range() conflict
parents 3a390f24 b84a8aba
......@@ -1035,7 +1035,8 @@ static void __vector_schedule_cleanup(struct apic_chip_data *apicd)
add_timer_on(&cl->timer, cpu);
}
} else {
apicd->prev_vector = 0;
pr_warn("IRQ %u schedule cleanup for offline CPU %u\n", apicd->irq, cpu);
free_moved_vector(apicd);
}
raw_spin_unlock(&vector_lock);
}
......@@ -1072,6 +1073,7 @@ void irq_complete_move(struct irq_cfg *cfg)
*/
void irq_force_complete_move(struct irq_desc *desc)
{
unsigned int cpu = smp_processor_id();
struct apic_chip_data *apicd;
struct irq_data *irqd;
unsigned int vector;
......@@ -1096,10 +1098,11 @@ void irq_force_complete_move(struct irq_desc *desc)
goto unlock;
/*
* If prev_vector is empty, no action required.
* If prev_vector is empty or the descriptor is neither currently
* nor previously on the outgoing CPU no action required.
*/
vector = apicd->prev_vector;
if (!vector)
if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu))
goto unlock;
/*
......
......@@ -69,6 +69,14 @@ static bool migrate_one_irq(struct irq_desc *desc)
return false;
}
/*
* Complete an eventually pending irq move cleanup. If this
* interrupt was moved in hard irq context, then the vectors need
* to be cleaned up. It can't wait until this interrupt actually
* happens and this CPU was involved.
*/
irq_force_complete_move(desc);
/*
* No move required, if:
* - Interrupt is per cpu
......@@ -87,14 +95,6 @@ static bool migrate_one_irq(struct irq_desc *desc)
return false;
}
/*
* Complete an eventually pending irq move cleanup. If this
* interrupt was moved in hard irq context, then the vectors need
* to be cleaned up. It can't wait until this interrupt actually
* happens and this CPU was involved.
*/
irq_force_complete_move(desc);
/*
* If there is a setaffinity pending, then try to reuse the pending
* mask, so the last change of the affinity does not get lost. If
......
......@@ -160,7 +160,10 @@ static int irq_find_free_area(unsigned int from, unsigned int cnt)
static unsigned int irq_find_at_or_after(unsigned int offset)
{
unsigned long index = offset;
struct irq_desc *desc = mt_find(&sparse_irqs, &index, nr_irqs);
struct irq_desc *desc;
guard(rcu)();
desc = mt_find(&sparse_irqs, &index, nr_irqs);
return desc ? irq_desc_get_irq(desc) : nr_irqs;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment