Commit b2b1f933 authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Tejun Heo

workqueue: Rename wq_update_pod() to unbound_wq_update_pwq()

What wq_update_pod() does is just to update the pwq of the specific
cpu.  Rename it and update the comments.
Signed-off-by: default avatarLai Jiangshan <jiangshan.ljs@antgroup.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent d160a58d
...@@ -433,7 +433,7 @@ static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES]; ...@@ -433,7 +433,7 @@ static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE; static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE;
/* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */ /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
static struct workqueue_attrs *wq_update_pod_attrs_buf; static struct workqueue_attrs *unbound_wq_update_pwq_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
...@@ -5341,13 +5341,12 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, ...@@ -5341,13 +5341,12 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
} }
/** /**
* wq_update_pod - update pod affinity of a wq for CPU hot[un]plug * unbound_wq_update_pwq - update a pwq slot for CPU hot[un]plug
* @wq: the target workqueue * @wq: the target workqueue
* @cpu: the CPU to update pool association for * @cpu: the CPU to update the pwq slot for
* *
* This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
* %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of * %CPU_DOWN_FAILED. @cpu is in the same pod of the CPU being hot[un]plugged.
* @wq accordingly.
* *
* *
* If pod affinity can't be adjusted due to memory allocation failure, it falls * If pod affinity can't be adjusted due to memory allocation failure, it falls
...@@ -5360,7 +5359,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, ...@@ -5360,7 +5359,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
* CPU_DOWN. If a workqueue user wants strict affinity, it's the user's * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's
* responsibility to flush the work item from CPU_DOWN_PREPARE. * responsibility to flush the work item from CPU_DOWN_PREPARE.
*/ */
static void wq_update_pod(struct workqueue_struct *wq, int cpu) static void unbound_wq_update_pwq(struct workqueue_struct *wq, int cpu)
{ {
struct pool_workqueue *old_pwq = NULL, *pwq; struct pool_workqueue *old_pwq = NULL, *pwq;
struct workqueue_attrs *target_attrs; struct workqueue_attrs *target_attrs;
...@@ -5375,7 +5374,7 @@ static void wq_update_pod(struct workqueue_struct *wq, int cpu) ...@@ -5375,7 +5374,7 @@ static void wq_update_pod(struct workqueue_struct *wq, int cpu)
* Let's use a preallocated one. The following buf is protected by * Let's use a preallocated one. The following buf is protected by
* CPU hotplug exclusion. * CPU hotplug exclusion.
*/ */
target_attrs = wq_update_pod_attrs_buf; target_attrs = unbound_wq_update_pwq_attrs_buf;
copy_workqueue_attrs(target_attrs, wq->unbound_attrs); copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask); wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask);
...@@ -6581,7 +6580,7 @@ int workqueue_online_cpu(unsigned int cpu) ...@@ -6581,7 +6580,7 @@ int workqueue_online_cpu(unsigned int cpu)
int tcpu; int tcpu;
for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
wq_update_pod(wq, tcpu); unbound_wq_update_pwq(wq, tcpu);
mutex_lock(&wq->mutex); mutex_lock(&wq->mutex);
wq_update_node_max_active(wq, -1); wq_update_node_max_active(wq, -1);
...@@ -6616,7 +6615,7 @@ int workqueue_offline_cpu(unsigned int cpu) ...@@ -6616,7 +6615,7 @@ int workqueue_offline_cpu(unsigned int cpu)
int tcpu; int tcpu;
for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
wq_update_pod(wq, tcpu); unbound_wq_update_pwq(wq, tcpu);
mutex_lock(&wq->mutex); mutex_lock(&wq->mutex);
wq_update_node_max_active(wq, cpu); wq_update_node_max_active(wq, cpu);
...@@ -6904,9 +6903,8 @@ static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp) ...@@ -6904,9 +6903,8 @@ static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
wq_affn_dfl = affn; wq_affn_dfl = affn;
list_for_each_entry(wq, &workqueues, list) { list_for_each_entry(wq, &workqueues, list) {
for_each_online_cpu(cpu) { for_each_online_cpu(cpu)
wq_update_pod(wq, cpu); unbound_wq_update_pwq(wq, cpu);
}
} }
mutex_unlock(&wq_pool_mutex); mutex_unlock(&wq_pool_mutex);
...@@ -7653,8 +7651,8 @@ void __init workqueue_init_early(void) ...@@ -7653,8 +7651,8 @@ void __init workqueue_init_early(void)
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
wq_update_pod_attrs_buf = alloc_workqueue_attrs(); unbound_wq_update_pwq_attrs_buf = alloc_workqueue_attrs();
BUG_ON(!wq_update_pod_attrs_buf); BUG_ON(!unbound_wq_update_pwq_attrs_buf);
/* /*
* If nohz_full is enabled, set power efficient workqueue as unbound. * If nohz_full is enabled, set power efficient workqueue as unbound.
...@@ -7919,12 +7917,12 @@ void __init workqueue_init_topology(void) ...@@ -7919,12 +7917,12 @@ void __init workqueue_init_topology(void)
/* /*
* Workqueues allocated earlier would have all CPUs sharing the default * Workqueues allocated earlier would have all CPUs sharing the default
* worker pool. Explicitly call wq_update_pod() on all workqueue and CPU * worker pool. Explicitly call unbound_wq_update_pwq() on all workqueue
* combinations to apply per-pod sharing. * and CPU combinations to apply per-pod sharing.
*/ */
list_for_each_entry(wq, &workqueues, list) { list_for_each_entry(wq, &workqueues, list) {
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
wq_update_pod(wq, cpu); unbound_wq_update_pwq(wq, cpu);
if (wq->flags & WQ_UNBOUND) { if (wq->flags & WQ_UNBOUND) {
mutex_lock(&wq->mutex); mutex_lock(&wq->mutex);
wq_update_node_max_active(wq, -1); wq_update_node_max_active(wq, -1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment