Commit a782a7e4 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/irq: Store irq descriptor in vector array

We can spare the irq_desc lookup in the interrupt entry code if we
store the descriptor pointer in the vector array instead the interrupt
number.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Jiang Liu <jiang.liu@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Link: http://lkml.kernel.org/r/20150802203609.717724106@linutronix.deSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent f61ae4fb
...@@ -182,10 +182,10 @@ extern char irq_entries_start[]; ...@@ -182,10 +182,10 @@ extern char irq_entries_start[];
#define trace_irq_entries_start irq_entries_start #define trace_irq_entries_start irq_entries_start
#endif #endif
#define VECTOR_UNUSED (-1) #define VECTOR_UNUSED NULL
#define VECTOR_RETRIGGERED (-2) #define VECTOR_RETRIGGERED ((void *)~0UL)
typedef int vector_irq_t[NR_VECTORS]; typedef struct irq_desc* vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq); DECLARE_PER_CPU(vector_irq_t, vector_irq);
#endif /* !ASSEMBLY_ */ #endif /* !ASSEMBLY_ */
......
...@@ -36,7 +36,9 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)); ...@@ -36,7 +36,9 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
extern void (*x86_platform_ipi_callback)(void); extern void (*x86_platform_ipi_callback)(void);
extern void native_init_IRQ(void); extern void native_init_IRQ(void);
extern bool handle_irq(unsigned irq, struct pt_regs *regs);
struct irq_desc;
extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
extern __visible unsigned int do_IRQ(struct pt_regs *regs); extern __visible unsigned int do_IRQ(struct pt_regs *regs);
......
...@@ -169,7 +169,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, ...@@ -169,7 +169,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
goto next; goto next;
for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) { for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNUSED) if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
goto next; goto next;
} }
/* Found one! */ /* Found one! */
...@@ -181,7 +181,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, ...@@ -181,7 +181,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
cpumask_intersects(d->old_domain, cpu_online_mask); cpumask_intersects(d->old_domain, cpu_online_mask);
} }
for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
per_cpu(vector_irq, new_cpu)[vector] = irq; per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
d->cfg.vector = vector; d->cfg.vector = vector;
cpumask_copy(d->domain, vector_cpumask); cpumask_copy(d->domain, vector_cpumask);
err = 0; err = 0;
...@@ -223,8 +223,9 @@ static int assign_irq_vector_policy(int irq, int node, ...@@ -223,8 +223,9 @@ static int assign_irq_vector_policy(int irq, int node,
static void clear_irq_vector(int irq, struct apic_chip_data *data) static void clear_irq_vector(int irq, struct apic_chip_data *data)
{ {
int cpu, vector; struct irq_desc *desc;
unsigned long flags; unsigned long flags;
int cpu, vector;
raw_spin_lock_irqsave(&vector_lock, flags); raw_spin_lock_irqsave(&vector_lock, flags);
BUG_ON(!data->cfg.vector); BUG_ON(!data->cfg.vector);
...@@ -241,10 +242,11 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data) ...@@ -241,10 +242,11 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
return; return;
} }
desc = irq_to_desc(irq);
for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) { for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
vector++) { vector++) {
if (per_cpu(vector_irq, cpu)[vector] != irq) if (per_cpu(vector_irq, cpu)[vector] != desc)
continue; continue;
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
break; break;
...@@ -402,30 +404,30 @@ int __init arch_early_irq_init(void) ...@@ -402,30 +404,30 @@ int __init arch_early_irq_init(void)
return arch_early_ioapic_init(); return arch_early_ioapic_init();
} }
/* Initialize vector_irq on a new cpu */
static void __setup_vector_irq(int cpu) static void __setup_vector_irq(int cpu)
{ {
/* Initialize vector_irq on a new cpu */
int irq, vector;
struct apic_chip_data *data; struct apic_chip_data *data;
struct irq_desc *desc;
int irq, vector;
/* Mark the inuse vectors */ /* Mark the inuse vectors */
for_each_active_irq(irq) { for_each_irq_desc(irq, desc) {
data = apic_chip_data(irq_get_irq_data(irq)); struct irq_data *idata = irq_desc_get_irq_data(desc);
if (!data)
continue;
if (!cpumask_test_cpu(cpu, data->domain)) data = apic_chip_data(idata);
if (!data || !cpumask_test_cpu(cpu, data->domain))
continue; continue;
vector = data->cfg.vector; vector = data->cfg.vector;
per_cpu(vector_irq, cpu)[vector] = irq; per_cpu(vector_irq, cpu)[vector] = desc;
} }
/* Mark the free vectors */ /* Mark the free vectors */
for (vector = 0; vector < NR_VECTORS; ++vector) { for (vector = 0; vector < NR_VECTORS; ++vector) {
irq = per_cpu(vector_irq, cpu)[vector]; desc = per_cpu(vector_irq, cpu)[vector];
if (irq <= VECTOR_UNUSED) if (IS_ERR_OR_NULL(desc))
continue; continue;
data = apic_chip_data(irq_get_irq_data(irq)); data = apic_chip_data(irq_desc_get_irq_data(desc));
if (!cpumask_test_cpu(cpu, data->domain)) if (!cpumask_test_cpu(cpu, data->domain))
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
} }
...@@ -447,7 +449,7 @@ void setup_vector_irq(int cpu) ...@@ -447,7 +449,7 @@ void setup_vector_irq(int cpu)
* legacy vector to irq mapping: * legacy vector to irq mapping:
*/ */
for (irq = 0; irq < nr_legacy_irqs(); irq++) for (irq = 0; irq < nr_legacy_irqs(); irq++)
per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq; per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq);
__setup_vector_irq(cpu); __setup_vector_irq(cpu);
} }
...@@ -543,19 +545,13 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) ...@@ -543,19 +545,13 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
me = smp_processor_id(); me = smp_processor_id();
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
int irq;
unsigned int irr;
struct irq_desc *desc;
struct apic_chip_data *data; struct apic_chip_data *data;
struct irq_desc *desc;
unsigned int irr;
retry: retry:
irq = __this_cpu_read(vector_irq[vector]); desc = __this_cpu_read(vector_irq[vector]);
if (IS_ERR_OR_NULL(desc))
if (irq <= VECTOR_UNUSED)
continue;
desc = irq_to_desc(irq);
if (!desc)
continue; continue;
if (!raw_spin_trylock(&desc->lock)) { if (!raw_spin_trylock(&desc->lock)) {
...@@ -565,9 +561,10 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) ...@@ -565,9 +561,10 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
goto retry; goto retry;
} }
data = apic_chip_data(&desc->irq_data); data = apic_chip_data(irq_desc_get_irq_data(desc));
if (!data) if (!data)
goto unlock; goto unlock;
/* /*
* Check if the irq migration is in progress. If so, we * Check if the irq migration is in progress. If so, we
* haven't received the cleanup request yet for this irq. * haven't received the cleanup request yet for this irq.
......
...@@ -211,22 +211,21 @@ u64 arch_irq_stat(void) ...@@ -211,22 +211,21 @@ u64 arch_irq_stat(void)
__visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
{ {
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
struct irq_desc * desc;
/* high bit used in ret_from_ code */ /* high bit used in ret_from_ code */
unsigned vector = ~regs->orig_ax; unsigned vector = ~regs->orig_ax;
unsigned irq;
entering_irq(); entering_irq();
irq = __this_cpu_read(vector_irq[vector]); desc = __this_cpu_read(vector_irq[vector]);
if (!handle_irq(irq, regs)) { if (!handle_irq(desc, regs)) {
ack_APIC_irq(); ack_APIC_irq();
if (irq != VECTOR_RETRIGGERED) { if (desc != VECTOR_RETRIGGERED) {
pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n", pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
__func__, smp_processor_id(), __func__, smp_processor_id(),
vector, irq); vector);
} else { } else {
__this_cpu_write(vector_irq[vector], VECTOR_UNUSED); __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
} }
...@@ -330,10 +329,10 @@ static struct cpumask affinity_new, online_new; ...@@ -330,10 +329,10 @@ static struct cpumask affinity_new, online_new;
*/ */
int check_irq_vectors_for_cpu_disable(void) int check_irq_vectors_for_cpu_disable(void)
{ {
int irq, cpu;
unsigned int this_cpu, vector, this_count, count; unsigned int this_cpu, vector, this_count, count;
struct irq_desc *desc; struct irq_desc *desc;
struct irq_data *data; struct irq_data *data;
int cpu;
this_cpu = smp_processor_id(); this_cpu = smp_processor_id();
cpumask_copy(&online_new, cpu_online_mask); cpumask_copy(&online_new, cpu_online_mask);
...@@ -341,24 +340,21 @@ int check_irq_vectors_for_cpu_disable(void) ...@@ -341,24 +340,21 @@ int check_irq_vectors_for_cpu_disable(void)
this_count = 0; this_count = 0;
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
irq = __this_cpu_read(vector_irq[vector]); desc = __this_cpu_read(vector_irq[vector]);
if (irq < 0) if (IS_ERR_OR_NULL(desc))
continue;
desc = irq_to_desc(irq);
if (!desc)
continue; continue;
/* /*
* Protect against concurrent action removal, affinity * Protect against concurrent action removal, affinity
* changes etc. * changes etc.
*/ */
raw_spin_lock(&desc->lock); raw_spin_lock(&desc->lock);
data = irq_desc_get_irq_data(desc); data = irq_desc_get_irq_data(desc);
cpumask_copy(&affinity_new, irq_data_get_affinity_mask(data)); cpumask_copy(&affinity_new,
irq_data_get_affinity_mask(data));
cpumask_clear_cpu(this_cpu, &affinity_new); cpumask_clear_cpu(this_cpu, &affinity_new);
/* Do not count inactive or per-cpu irqs. */ /* Do not count inactive or per-cpu irqs. */
if (!irq_has_action(irq) || irqd_is_per_cpu(data)) { if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) {
raw_spin_unlock(&desc->lock); raw_spin_unlock(&desc->lock);
continue; continue;
} }
...@@ -399,8 +395,8 @@ int check_irq_vectors_for_cpu_disable(void) ...@@ -399,8 +395,8 @@ int check_irq_vectors_for_cpu_disable(void)
for (vector = FIRST_EXTERNAL_VECTOR; for (vector = FIRST_EXTERNAL_VECTOR;
vector < first_system_vector; vector++) { vector < first_system_vector; vector++) {
if (!test_bit(vector, used_vectors) && if (!test_bit(vector, used_vectors) &&
per_cpu(vector_irq, cpu)[vector] <= VECTOR_UNUSED) IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
count++; count++;
} }
} }
...@@ -504,14 +500,13 @@ void fixup_irqs(void) ...@@ -504,14 +500,13 @@ void fixup_irqs(void)
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
unsigned int irr; unsigned int irr;
if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNUSED) if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
continue; continue;
irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
if (irr & (1 << (vector % 32))) { if (irr & (1 << (vector % 32))) {
irq = __this_cpu_read(vector_irq[vector]); desc = __this_cpu_read(vector_irq[vector]);
desc = irq_to_desc(irq);
raw_spin_lock(&desc->lock); raw_spin_lock(&desc->lock);
data = irq_desc_get_irq_data(desc); data = irq_desc_get_irq_data(desc);
chip = irq_data_get_irq_chip(data); chip = irq_data_get_irq_chip(data);
......
...@@ -148,21 +148,20 @@ void do_softirq_own_stack(void) ...@@ -148,21 +148,20 @@ void do_softirq_own_stack(void)
call_on_stack(__do_softirq, isp); call_on_stack(__do_softirq, isp);
} }
bool handle_irq(unsigned irq, struct pt_regs *regs) bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{ {
struct irq_desc *desc; unsigned int irq = irq_desc_get_irq(desc);
int overflow; int overflow;
overflow = check_stack_overflow(); overflow = check_stack_overflow();
desc = irq_to_desc(irq); if (IS_ERR_OR_NULL(desc))
if (unlikely(!desc))
return false; return false;
if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) { if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
if (unlikely(overflow)) if (unlikely(overflow))
print_stack_overflow(); print_stack_overflow();
desc->handle_irq(irq, desc); generic_handle_irq_desc(irq, desc);
} }
return true; return true;
......
...@@ -68,16 +68,13 @@ static inline void stack_overflow_check(struct pt_regs *regs) ...@@ -68,16 +68,13 @@ static inline void stack_overflow_check(struct pt_regs *regs)
#endif #endif
} }
bool handle_irq(unsigned irq, struct pt_regs *regs) bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{ {
struct irq_desc *desc;
stack_overflow_check(regs); stack_overflow_check(regs);
desc = irq_to_desc(irq); if (unlikely(IS_ERR_OR_NULL(desc)))
if (unlikely(!desc))
return false; return false;
generic_handle_irq_desc(irq, desc); generic_handle_irq_desc(irq_desc_get_irq(desc), desc);
return true; return true;
} }
...@@ -60,7 +60,7 @@ int vector_used_by_percpu_irq(unsigned int vector) ...@@ -60,7 +60,7 @@ int vector_used_by_percpu_irq(unsigned int vector)
int cpu; int cpu;
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (per_cpu(vector_irq, cpu)[vector] > VECTOR_UNUSED) if (!IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
return 1; return 1;
} }
...@@ -94,7 +94,7 @@ void __init init_IRQ(void) ...@@ -94,7 +94,7 @@ void __init init_IRQ(void)
* irq's migrate etc. * irq's migrate etc.
*/ */
for (i = 0; i < nr_legacy_irqs(); i++) for (i = 0; i < nr_legacy_irqs(); i++)
per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = i; per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i);
x86_init.irqs.intr_init(); x86_init.irqs.intr_init();
} }
......
...@@ -843,6 +843,7 @@ static struct irq_chip lguest_irq_controller = { ...@@ -843,6 +843,7 @@ static struct irq_chip lguest_irq_controller = {
*/ */
static int lguest_setup_irq(unsigned int irq) static int lguest_setup_irq(unsigned int irq)
{ {
struct irq_desc *desc;
int err; int err;
/* Returns -ve error or vector number. */ /* Returns -ve error or vector number. */
...@@ -858,7 +859,8 @@ static int lguest_setup_irq(unsigned int irq) ...@@ -858,7 +859,8 @@ static int lguest_setup_irq(unsigned int irq)
handle_level_irq, "level"); handle_level_irq, "level");
/* Some systems map "vectors" to interrupts weirdly. Not us! */ /* Some systems map "vectors" to interrupts weirdly. Not us! */
__this_cpu_write(vector_irq[FIRST_EXTERNAL_VECTOR + irq], irq); desc = irq_to_desc(irq);
__this_cpu_write(vector_irq[FIRST_EXTERNAL_VECTOR + irq], desc);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment