Commit e5efc231 authored by Erwan Velu's avatar Erwan Velu Committed by Jakub Kicinski

net/mlx5: Use cpumask_local_spread() instead of custom code

Commit 2acda577 ("net/mlx5e: Improve remote NUMA preferences used for the IRQ affinity hints")
removed the usage of cpumask_local_spread().

The issue explained in this commit was fixed by
commit 406d394a ("cpumask: improve on cpumask_local_spread() locality").

Since this commit, mlx5_cpumask_default_spread() is having the same
behavior as cpumask_local_spread().

This commit is about :
- removing the specific logic and use cpumask_local_spread() instead
- passing mlx5_core_dev as argument to more flexibility

mlx5_cpumask_default_spread() is kept as it could be useful for some
future specific quirks.
Signed-off-by: default avatarErwan Velu <e.velu@criteo.com>
Acked-by: default avatarYury Norov <yury.norov@gmail.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20240812082244.22810-1-e.velu@criteo.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 8ea71e23
...@@ -835,28 +835,9 @@ static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx) ...@@ -835,28 +835,9 @@ static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx)
mlx5_irq_release_vector(irq); mlx5_irq_release_vector(irq);
} }
static int mlx5_cpumask_default_spread(int numa_node, int index) static int mlx5_cpumask_default_spread(struct mlx5_core_dev *dev, int index)
{ {
const struct cpumask *prev = cpu_none_mask; return cpumask_local_spread(index, dev->priv.numa_node);
const struct cpumask *mask;
int found_cpu = 0;
int i = 0;
int cpu;
rcu_read_lock();
for_each_numa_hop_mask(mask, numa_node) {
for_each_cpu_andnot(cpu, mask, prev) {
if (i++ == index) {
found_cpu = cpu;
goto spread_done;
}
}
prev = mask;
}
spread_done:
rcu_read_unlock();
return found_cpu;
} }
static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev) static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
...@@ -880,7 +861,7 @@ static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx) ...@@ -880,7 +861,7 @@ static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
int cpu; int cpu;
rmap = mlx5_eq_table_get_pci_rmap(dev); rmap = mlx5_eq_table_get_pci_rmap(dev);
cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx); cpu = mlx5_cpumask_default_spread(dev, vecidx);
irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap); irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
if (IS_ERR(irq)) if (IS_ERR(irq))
return PTR_ERR(irq); return PTR_ERR(irq);
...@@ -1145,7 +1126,7 @@ int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector) ...@@ -1145,7 +1126,7 @@ int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
if (mask) if (mask)
cpu = cpumask_first(mask); cpu = cpumask_first(mask);
else else
cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector); cpu = mlx5_cpumask_default_spread(dev, vector);
return cpu; return cpu;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment