Commit 6e49ba1b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux

Pull fixes for cpumask and modules from Rusty Russell:
 "** NOW WITH TESTING! **

  Two fixes which got lost in my recent distraction.  One is a weird
  cpumask function which needed to be rewritten, the other is a module
  bug which is cc:stable"

* tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux:
  cpumask_set_cpu_local_first => cpumask_local_spread, lament
  module: Call module notifier on failure after complete_formation()
parents d0af6988 f36963c9
...@@ -2358,11 +2358,11 @@ static int be_evt_queues_create(struct be_adapter *adapter) ...@@ -2358,11 +2358,11 @@ static int be_evt_queues_create(struct be_adapter *adapter)
adapter->cfg_num_qs); adapter->cfg_num_qs);
for_all_evt_queues(adapter, eqo, i) { for_all_evt_queues(adapter, eqo, i) {
int numa_node = dev_to_node(&adapter->pdev->dev);
if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
return -ENOMEM; return -ENOMEM;
cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev), cpumask_set_cpu(cpumask_local_spread(i, numa_node),
eqo->affinity_mask); eqo->affinity_mask);
netif_napi_add(adapter->netdev, &eqo->napi, be_poll, netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
BE_NAPI_WEIGHT); BE_NAPI_WEIGHT);
napi_hash_add(&eqo->napi); napi_hash_add(&eqo->napi);
......
...@@ -1501,17 +1501,13 @@ static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) ...@@ -1501,17 +1501,13 @@ static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
{ {
struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
int numa_node = priv->mdev->dev->numa_node; int numa_node = priv->mdev->dev->numa_node;
int ret = 0;
if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
return -ENOMEM; return -ENOMEM;
ret = cpumask_set_cpu_local_first(ring_idx, numa_node, cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
ring->affinity_mask); ring->affinity_mask);
if (ret) return 0;
free_cpumask_var(ring->affinity_mask);
return ret;
} }
static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
......
...@@ -144,9 +144,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ...@@ -144,9 +144,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->queue_index = queue_index; ring->queue_index = queue_index;
if (queue_index < priv->num_tx_rings_p_up) if (queue_index < priv->num_tx_rings_p_up)
cpumask_set_cpu_local_first(queue_index, cpumask_set_cpu(cpumask_local_spread(queue_index,
priv->mdev->dev->numa_node, priv->mdev->dev->numa_node),
&ring->affinity_mask); &ring->affinity_mask);
*pring = ring; *pring = ring;
return 0; return 0;
......
...@@ -151,10 +151,8 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask, ...@@ -151,10 +151,8 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask,
return 1; return 1;
} }
static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) static inline unsigned int cpumask_local_spread(unsigned int i, int node)
{ {
set_bit(0, cpumask_bits(dstp));
return 0; return 0;
} }
...@@ -208,7 +206,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) ...@@ -208,7 +206,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp); unsigned int cpumask_local_spread(unsigned int i, int node);
/** /**
* for_each_cpu - iterate over every cpu in a mask * for_each_cpu - iterate over every cpu in a mask
......
...@@ -3370,6 +3370,9 @@ static int load_module(struct load_info *info, const char __user *uargs, ...@@ -3370,6 +3370,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
module_bug_cleanup(mod); module_bug_cleanup(mod);
mutex_unlock(&module_mutex); mutex_unlock(&module_mutex);
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_GOING, mod);
/* we can't deallocate the module until we clear memory protection */ /* we can't deallocate the module until we clear memory protection */
unset_module_init_ro_nx(mod); unset_module_init_ro_nx(mod);
unset_module_core_ro_nx(mod); unset_module_core_ro_nx(mod);
......
...@@ -139,64 +139,42 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask) ...@@ -139,64 +139,42 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
#endif #endif
/** /**
* cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first * cpumask_local_spread - select the i'th cpu with local numa cpu's first
*
* @i: index number * @i: index number
* @numa_node: local numa_node * @node: local numa_node
* @dstp: cpumask with the relevant cpu bit set according to the policy
* *
* This function sets the cpumask according to a numa aware policy. * This function selects an online CPU according to a numa aware policy;
* cpumask could be used as an affinity hint for the IRQ related to a * local cpus are returned first, followed by non-local ones, then it
* queue. When the policy is to spread queues across cores - local cores * wraps around.
* first.
* *
* Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set * It's not very efficient, but useful for setup.
* the cpu bit and need to re-call the function.
*/ */
int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) unsigned int cpumask_local_spread(unsigned int i, int node)
{ {
cpumask_var_t mask;
int cpu; int cpu;
int ret = 0;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
/* Wrap: we always want a cpu. */
i %= num_online_cpus(); i %= num_online_cpus();
if (numa_node == -1 || !cpumask_of_node(numa_node)) { if (node == -1) {
/* Use all online cpu's for non numa aware system */ for_each_cpu(cpu, cpu_online_mask)
cpumask_copy(mask, cpu_online_mask); if (i-- == 0)
return cpu;
} else { } else {
int n; /* NUMA first. */
for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
cpumask_and(mask, if (i-- == 0)
cpumask_of_node(numa_node), cpu_online_mask); return cpu;
n = cpumask_weight(mask); for_each_cpu(cpu, cpu_online_mask) {
if (i >= n) { /* Skip NUMA nodes, done above. */
i -= n; if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
continue;
/* If index > number of local cpu's, mask out local
* cpu's if (i-- == 0)
*/ return cpu;
cpumask_andnot(mask, cpu_online_mask, mask);
} }
} }
BUG();
for_each_cpu(cpu, mask) {
if (--i < 0)
goto out;
}
ret = -EAGAIN;
out:
free_cpumask_var(mask);
if (!ret)
cpumask_set_cpu(cpu, dstp);
return ret;
} }
EXPORT_SYMBOL(cpumask_set_cpu_local_first); EXPORT_SYMBOL(cpumask_local_spread);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment