Commit 0145c30e authored by Thomas Gleixner's avatar Thomas Gleixner

genirq/affinity: Code consolidation

All information and calculations in the interrupt affinity spreading code
is strictly unsigned int. Though the code uses int all over the place.

Convert it over to unsigned int.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Acked-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Bjorn Helgaas <helgaas@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: linux-block@vger.kernel.org
Cc: Sagi Grimberg <sagi@grimberg.me>
Cc: linux-nvme@lists.infradead.org
Cc: linux-pci@vger.kernel.org
Cc: Keith Busch <keith.busch@intel.com>
Cc: Sumit Saxena <sumit.saxena@broadcom.com>
Cc: Kashyap Desai <kashyap.desai@broadcom.com>
Cc: Shivasharan Srikanteshwara <shivasharan.srikanteshwara@broadcom.com>
Link: https://lkml.kernel.org/r/20190216172228.336424556@linutronix.de
parent d869f866
...@@ -251,10 +251,10 @@ struct irq_affinity_notify { ...@@ -251,10 +251,10 @@ struct irq_affinity_notify {
* @sets: Number of affinitized sets * @sets: Number of affinitized sets
*/ */
struct irq_affinity { struct irq_affinity {
int pre_vectors; unsigned int pre_vectors;
int post_vectors; unsigned int post_vectors;
int nr_sets; unsigned int nr_sets;
int *sets; unsigned int *sets;
}; };
/** /**
...@@ -314,9 +314,10 @@ extern int ...@@ -314,9 +314,10 @@ extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
struct irq_affinity_desc * struct irq_affinity_desc *
irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); irq_create_affinity_masks(unsigned int nvec, const struct irq_affinity *affd);
int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd); unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
const struct irq_affinity *affd);
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
...@@ -350,13 +351,14 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) ...@@ -350,13 +351,14 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
} }
static inline struct irq_affinity_desc * static inline struct irq_affinity_desc *
irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) irq_create_affinity_masks(unsigned int nvec, const struct irq_affinity *affd)
{ {
return NULL; return NULL;
} }
static inline int static inline unsigned int
irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
const struct irq_affinity *affd)
{ {
return maxvec; return maxvec;
} }
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
int cpus_per_vec) unsigned int cpus_per_vec)
{ {
const struct cpumask *siblmsk; const struct cpumask *siblmsk;
int cpu, sibl; int cpu, sibl;
...@@ -95,15 +95,17 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask, ...@@ -95,15 +95,17 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
} }
static int __irq_build_affinity_masks(const struct irq_affinity *affd, static int __irq_build_affinity_masks(const struct irq_affinity *affd,
int startvec, int numvecs, int firstvec, unsigned int startvec,
unsigned int numvecs,
unsigned int firstvec,
cpumask_var_t *node_to_cpumask, cpumask_var_t *node_to_cpumask,
const struct cpumask *cpu_mask, const struct cpumask *cpu_mask,
struct cpumask *nmsk, struct cpumask *nmsk,
struct irq_affinity_desc *masks) struct irq_affinity_desc *masks)
{ {
int n, nodes, cpus_per_vec, extra_vecs, done = 0; unsigned int n, nodes, cpus_per_vec, extra_vecs, done = 0;
int last_affv = firstvec + numvecs; unsigned int last_affv = firstvec + numvecs;
int curvec = startvec; unsigned int curvec = startvec;
nodemask_t nodemsk = NODE_MASK_NONE; nodemask_t nodemsk = NODE_MASK_NONE;
if (!cpumask_weight(cpu_mask)) if (!cpumask_weight(cpu_mask))
...@@ -117,18 +119,16 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd, ...@@ -117,18 +119,16 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
*/ */
if (numvecs <= nodes) { if (numvecs <= nodes) {
for_each_node_mask(n, nodemsk) { for_each_node_mask(n, nodemsk) {
cpumask_or(&masks[curvec].mask, cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
&masks[curvec].mask, node_to_cpumask[n]);
node_to_cpumask[n]);
if (++curvec == last_affv) if (++curvec == last_affv)
curvec = firstvec; curvec = firstvec;
} }
done = numvecs; return numvecs;
goto out;
} }
for_each_node_mask(n, nodemsk) { for_each_node_mask(n, nodemsk) {
int ncpus, v, vecs_to_assign, vecs_per_node; unsigned int ncpus, v, vecs_to_assign, vecs_per_node;
/* Spread the vectors per node */ /* Spread the vectors per node */
vecs_per_node = (numvecs - (curvec - firstvec)) / nodes; vecs_per_node = (numvecs - (curvec - firstvec)) / nodes;
...@@ -163,8 +163,6 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd, ...@@ -163,8 +163,6 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
curvec = firstvec; curvec = firstvec;
--nodes; --nodes;
} }
out:
return done; return done;
} }
...@@ -174,13 +172,14 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd, ...@@ -174,13 +172,14 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
* 2) spread other possible CPUs on these vectors * 2) spread other possible CPUs on these vectors
*/ */
static int irq_build_affinity_masks(const struct irq_affinity *affd, static int irq_build_affinity_masks(const struct irq_affinity *affd,
int startvec, int numvecs, int firstvec, unsigned int startvec, unsigned int numvecs,
unsigned int firstvec,
struct irq_affinity_desc *masks) struct irq_affinity_desc *masks)
{ {
int curvec = startvec, nr_present, nr_others; unsigned int curvec = startvec, nr_present, nr_others;
int ret = -ENOMEM;
cpumask_var_t nmsk, npresmsk;
cpumask_var_t *node_to_cpumask; cpumask_var_t *node_to_cpumask;
cpumask_var_t nmsk, npresmsk;
int ret = -ENOMEM;
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
return ret; return ret;
...@@ -239,12 +238,10 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd, ...@@ -239,12 +238,10 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
* Returns the irq_affinity_desc pointer or NULL if allocation failed. * Returns the irq_affinity_desc pointer or NULL if allocation failed.
*/ */
struct irq_affinity_desc * struct irq_affinity_desc *
irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) irq_create_affinity_masks(unsigned int nvecs, const struct irq_affinity *affd)
{ {
int affvecs = nvecs - affd->pre_vectors - affd->post_vectors; unsigned int affvecs, curvec, usedvecs, nr_sets, i;
int curvec, usedvecs;
struct irq_affinity_desc *masks = NULL; struct irq_affinity_desc *masks = NULL;
int i, nr_sets;
/* /*
* If there aren't any vectors left after applying the pre/post * If there aren't any vectors left after applying the pre/post
...@@ -264,16 +261,17 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) ...@@ -264,16 +261,17 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
* Spread on present CPUs starting from affd->pre_vectors. If we * Spread on present CPUs starting from affd->pre_vectors. If we
* have multiple sets, build each sets affinity mask separately. * have multiple sets, build each sets affinity mask separately.
*/ */
affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
nr_sets = affd->nr_sets; nr_sets = affd->nr_sets;
if (!nr_sets) if (!nr_sets)
nr_sets = 1; nr_sets = 1;
for (i = 0, usedvecs = 0; i < nr_sets; i++) { for (i = 0, usedvecs = 0; i < nr_sets; i++) {
int this_vecs = affd->sets ? affd->sets[i] : affvecs; unsigned int this_vecs = affd->sets ? affd->sets[i] : affvecs;
int ret; int ret;
ret = irq_build_affinity_masks(affd, curvec, this_vecs, ret = irq_build_affinity_masks(affd, curvec, this_vecs,
curvec, masks); curvec, masks);
if (ret) { if (ret) {
kfree(masks); kfree(masks);
return NULL; return NULL;
...@@ -303,17 +301,17 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) ...@@ -303,17 +301,17 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
* @maxvec: The maximum number of vectors available * @maxvec: The maximum number of vectors available
* @affd: Description of the affinity requirements * @affd: Description of the affinity requirements
*/ */
int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
const struct irq_affinity *affd)
{ {
int resv = affd->pre_vectors + affd->post_vectors; unsigned int resv = affd->pre_vectors + affd->post_vectors;
int vecs = maxvec - resv; unsigned int set_vecs;
int set_vecs;
if (resv > minvec) if (resv > minvec)
return 0; return 0;
if (affd->nr_sets) { if (affd->nr_sets) {
int i; unsigned int i;
for (i = 0, set_vecs = 0; i < affd->nr_sets; i++) for (i = 0, set_vecs = 0; i < affd->nr_sets; i++)
set_vecs += affd->sets[i]; set_vecs += affd->sets[i];
...@@ -323,5 +321,5 @@ int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity ...@@ -323,5 +321,5 @@ int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity
put_online_cpus(); put_online_cpus();
} }
return resv + min(set_vecs, vecs); return resv + min(set_vecs, maxvec - resv);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment