Commit 68d54d3f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq affinity fixes from Thomas Gleixner:

  - Fix error path handling in the affinity spreading code

  - Make affinity spreading smarter to avoid issues on systems which
    claim to have hotpluggable CPUs while in fact they can't hotplug
    anything.

    So instead of trying to spread the vectors (and thereby the
    associated device queues) to all possibe CPUs, spread them on all
    present CPUs first. If there are left over vectors after that first
    step they are spread among the possible, but not present CPUs which
    keeps the code backwards compatible for virtual decives and NVME
    which allocate a queue per possible CPU, but makes the spreading
    smarter for devices which have less queues than possible or present
    CPUs.

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  genirq/affinity: Spread irq vectors among present CPUs as far as possible
  genirq/affinity: Allow irq spreading from a given starting point
  genirq/affinity: Move actual irq vector spreading into a helper function
  genirq/affinity: Rename *node_to_possible_cpumask as *node_to_cpumask
  genirq/affinity: Don't return with empty affinity masks on error
parents 9dceab89 d3056812
...@@ -39,7 +39,7 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, ...@@ -39,7 +39,7 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
} }
} }
static cpumask_var_t *alloc_node_to_possible_cpumask(void) static cpumask_var_t *alloc_node_to_cpumask(void)
{ {
cpumask_var_t *masks; cpumask_var_t *masks;
int node; int node;
...@@ -62,7 +62,7 @@ static cpumask_var_t *alloc_node_to_possible_cpumask(void) ...@@ -62,7 +62,7 @@ static cpumask_var_t *alloc_node_to_possible_cpumask(void)
return NULL; return NULL;
} }
static void free_node_to_possible_cpumask(cpumask_var_t *masks) static void free_node_to_cpumask(cpumask_var_t *masks)
{ {
int node; int node;
...@@ -71,7 +71,7 @@ static void free_node_to_possible_cpumask(cpumask_var_t *masks) ...@@ -71,7 +71,7 @@ static void free_node_to_possible_cpumask(cpumask_var_t *masks)
kfree(masks); kfree(masks);
} }
static void build_node_to_possible_cpumask(cpumask_var_t *masks) static void build_node_to_cpumask(cpumask_var_t *masks)
{ {
int cpu; int cpu;
...@@ -79,14 +79,14 @@ static void build_node_to_possible_cpumask(cpumask_var_t *masks) ...@@ -79,14 +79,14 @@ static void build_node_to_possible_cpumask(cpumask_var_t *masks)
cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]); cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
} }
static int get_nodes_in_cpumask(cpumask_var_t *node_to_possible_cpumask, static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
const struct cpumask *mask, nodemask_t *nodemsk) const struct cpumask *mask, nodemask_t *nodemsk)
{ {
int n, nodes = 0; int n, nodes = 0;
/* Calculate the number of nodes in the supplied affinity mask */ /* Calculate the number of nodes in the supplied affinity mask */
for_each_node(n) { for_each_node(n) {
if (cpumask_intersects(mask, node_to_possible_cpumask[n])) { if (cpumask_intersects(mask, node_to_cpumask[n])) {
node_set(n, *nodemsk); node_set(n, *nodemsk);
nodes++; nodes++;
} }
...@@ -94,73 +94,46 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_possible_cpumask, ...@@ -94,73 +94,46 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_possible_cpumask,
return nodes; return nodes;
} }
/** static int irq_build_affinity_masks(const struct irq_affinity *affd,
* irq_create_affinity_masks - Create affinity masks for multiqueue spreading int startvec, int numvecs,
* @nvecs: The total number of vectors cpumask_var_t *node_to_cpumask,
* @affd: Description of the affinity requirements const struct cpumask *cpu_mask,
* struct cpumask *nmsk,
* Returns the masks pointer or NULL if allocation failed. struct cpumask *masks)
*/
struct cpumask *
irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
{ {
int n, nodes, cpus_per_vec, extra_vecs, curvec; int n, nodes, cpus_per_vec, extra_vecs, done = 0;
int affv = nvecs - affd->pre_vectors - affd->post_vectors; int last_affv = affd->pre_vectors + numvecs;
int last_affv = affv + affd->pre_vectors; int curvec = startvec;
nodemask_t nodemsk = NODE_MASK_NONE; nodemask_t nodemsk = NODE_MASK_NONE;
struct cpumask *masks;
cpumask_var_t nmsk, *node_to_possible_cpumask;
/*
* If there aren't any vectors left after applying the pre/post
* vectors don't bother with assigning affinity.
*/
if (!affv)
return NULL;
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
return NULL;
masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
if (!masks)
goto out;
node_to_possible_cpumask = alloc_node_to_possible_cpumask(); if (!cpumask_weight(cpu_mask))
if (!node_to_possible_cpumask) return 0;
goto out;
/* Fill out vectors at the beginning that don't need affinity */ nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
for (curvec = 0; curvec < affd->pre_vectors; curvec++)
cpumask_copy(masks + curvec, irq_default_affinity);
/* Stabilize the cpumasks */
get_online_cpus();
build_node_to_possible_cpumask(node_to_possible_cpumask);
nodes = get_nodes_in_cpumask(node_to_possible_cpumask, cpu_possible_mask,
&nodemsk);
/* /*
* If the number of nodes in the mask is greater than or equal the * If the number of nodes in the mask is greater than or equal the
* number of vectors we just spread the vectors across the nodes. * number of vectors we just spread the vectors across the nodes.
*/ */
if (affv <= nodes) { if (numvecs <= nodes) {
for_each_node_mask(n, nodemsk) { for_each_node_mask(n, nodemsk) {
cpumask_copy(masks + curvec, cpumask_copy(masks + curvec, node_to_cpumask[n]);
node_to_possible_cpumask[n]); if (++done == numvecs)
if (++curvec == last_affv)
break; break;
if (++curvec == last_affv)
curvec = affd->pre_vectors;
} }
goto done; goto out;
} }
for_each_node_mask(n, nodemsk) { for_each_node_mask(n, nodemsk) {
int ncpus, v, vecs_to_assign, vecs_per_node; int ncpus, v, vecs_to_assign, vecs_per_node;
/* Spread the vectors per node */ /* Spread the vectors per node */
vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes; vecs_per_node = (numvecs - (curvec - affd->pre_vectors)) / nodes;
/* Get the cpus on this node which are in the mask */ /* Get the cpus on this node which are in the mask */
cpumask_and(nmsk, cpu_possible_mask, node_to_possible_cpumask[n]); cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
/* Calculate the number of cpus per vector */ /* Calculate the number of cpus per vector */
ncpus = cpumask_weight(nmsk); ncpus = cpumask_weight(nmsk);
...@@ -181,19 +154,96 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) ...@@ -181,19 +154,96 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
} }
if (curvec >= last_affv) done += v;
if (done >= numvecs)
break; break;
if (curvec >= last_affv)
curvec = affd->pre_vectors;
--nodes; --nodes;
} }
done: out:
return done;
}
/**
* irq_create_affinity_masks - Create affinity masks for multiqueue spreading
* @nvecs: The total number of vectors
* @affd: Description of the affinity requirements
*
* Returns the masks pointer or NULL if allocation failed.
*/
struct cpumask *
irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
{
int affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
int curvec, usedvecs;
cpumask_var_t nmsk, npresmsk, *node_to_cpumask;
struct cpumask *masks = NULL;
/*
* If there aren't any vectors left after applying the pre/post
* vectors don't bother with assigning affinity.
*/
if (nvecs == affd->pre_vectors + affd->post_vectors)
return NULL;
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
return NULL;
if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
goto outcpumsk;
node_to_cpumask = alloc_node_to_cpumask();
if (!node_to_cpumask)
goto outnpresmsk;
masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
if (!masks)
goto outnodemsk;
/* Fill out vectors at the beginning that don't need affinity */
for (curvec = 0; curvec < affd->pre_vectors; curvec++)
cpumask_copy(masks + curvec, irq_default_affinity);
/* Stabilize the cpumasks */
get_online_cpus();
build_node_to_cpumask(node_to_cpumask);
/* Spread on present CPUs starting from affd->pre_vectors */
usedvecs = irq_build_affinity_masks(affd, curvec, affvecs,
node_to_cpumask, cpu_present_mask,
nmsk, masks);
/*
* Spread on non present CPUs starting from the next vector to be
* handled. If the spreading of present CPUs already exhausted the
* vector space, assign the non present CPUs to the already spread
* out vectors.
*/
if (usedvecs >= affvecs)
curvec = affd->pre_vectors;
else
curvec = affd->pre_vectors + usedvecs;
cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
usedvecs += irq_build_affinity_masks(affd, curvec, affvecs,
node_to_cpumask, npresmsk,
nmsk, masks);
put_online_cpus(); put_online_cpus();
/* Fill out vectors at the end that don't need affinity */ /* Fill out vectors at the end that don't need affinity */
if (usedvecs >= affvecs)
curvec = affd->pre_vectors + affvecs;
else
curvec = affd->pre_vectors + usedvecs;
for (; curvec < nvecs; curvec++) for (; curvec < nvecs; curvec++)
cpumask_copy(masks + curvec, irq_default_affinity); cpumask_copy(masks + curvec, irq_default_affinity);
free_node_to_possible_cpumask(node_to_possible_cpumask);
out: outnodemsk:
free_node_to_cpumask(node_to_cpumask);
outnpresmsk:
free_cpumask_var(npresmsk);
outcpumsk:
free_cpumask_var(nmsk); free_cpumask_var(nmsk);
return masks; return masks;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment