Commit cbbfb0ae authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 apic updates from Ingo Molnar:
 "Improve the spreading of managed IRQs at allocation time"

* 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  irq/matrix: Spread managed interrupts on allocation
  irq/matrix: Split out the CPU selection code into a helper
parents 42f52e1c 76f99ae5
......@@ -313,14 +313,13 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
struct apic_chip_data *apicd = apic_chip_data(irqd);
int vector, cpu;
cpumask_and(vector_searchmask, vector_searchmask, affmsk);
cpu = cpumask_first(vector_searchmask);
if (cpu >= nr_cpu_ids)
return -EINVAL;
cpumask_and(vector_searchmask, dest, affmsk);
/* set_affinity might call here for nothing */
if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
return 0;
vector = irq_matrix_alloc_managed(vector_matrix, cpu);
vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
&cpu);
trace_vector_alloc_managed(irqd->irq, vector, vector);
if (vector < 0)
return vector;
......
......@@ -1151,7 +1151,8 @@ void irq_matrix_offline(struct irq_matrix *m);
void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
unsigned int *mapped_cpu);
void irq_matrix_reserve(struct irq_matrix *m);
void irq_matrix_remove_reserved(struct irq_matrix *m);
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
......
......@@ -124,6 +124,27 @@ static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
return area;
}
/* Find the best CPU which has the lowest vector allocation count */
static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
const struct cpumask *msk)
{
unsigned int cpu, best_cpu, maxavl = 0;
struct cpumap *cm;
best_cpu = UINT_MAX;
for_each_cpu(cpu, msk) {
cm = per_cpu_ptr(m->maps, cpu);
if (!cm->online || cm->available <= maxavl)
continue;
best_cpu = cpu;
maxavl = cm->available;
}
return best_cpu;
}
/**
* irq_matrix_assign_system - Assign system wide entry in the matrix
* @m: Matrix pointer
......@@ -239,11 +260,21 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
* @m: Matrix pointer
* @cpu: On which CPU the interrupt should be allocated
*/
int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
unsigned int *mapped_cpu)
{
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
unsigned int bit, end = m->alloc_end;
unsigned int bit, cpu, end = m->alloc_end;
struct cpumap *cm;
if (cpumask_empty(msk))
return -EINVAL;
cpu = matrix_find_best_cpu(m, msk);
if (cpu == UINT_MAX)
return -ENOSPC;
cm = per_cpu_ptr(m->maps, cpu);
end = m->alloc_end;
/* Get managed bit which are not allocated */
bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
bit = find_first_bit(m->scratch_map, end);
......@@ -252,6 +283,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
set_bit(bit, cm->alloc_map);
cm->allocated++;
m->total_allocated++;
*mapped_cpu = cpu;
trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
return bit;
}
......@@ -322,37 +354,27 @@ void irq_matrix_remove_reserved(struct irq_matrix *m)
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
bool reserved, unsigned int *mapped_cpu)
{
unsigned int cpu, best_cpu, maxavl = 0;
unsigned int cpu, bit;
struct cpumap *cm;
unsigned int bit;
best_cpu = UINT_MAX;
for_each_cpu(cpu, msk) {
cm = per_cpu_ptr(m->maps, cpu);
if (!cm->online || cm->available <= maxavl)
continue;
best_cpu = cpu;
maxavl = cm->available;
}
cpu = matrix_find_best_cpu(m, msk);
if (cpu == UINT_MAX)
return -ENOSPC;
if (maxavl) {
cm = per_cpu_ptr(m->maps, best_cpu);
cm = per_cpu_ptr(m->maps, cpu);
bit = matrix_alloc_area(m, cm, 1, false);
if (bit < m->alloc_end) {
if (bit >= m->alloc_end)
return -ENOSPC;
cm->allocated++;
cm->available--;
m->total_allocated++;
m->global_available--;
if (reserved)
m->global_reserved--;
*mapped_cpu = best_cpu;
trace_irq_matrix_alloc(bit, best_cpu, m, cm);
*mapped_cpu = cpu;
trace_irq_matrix_alloc(bit, cpu, m, cm);
return bit;
}
}
return -ENOSPC;
}
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment