Commit f4b7b53c authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Tejun Heo

workqueue: Detach workers directly in idle_cull_fn()

The code to kick off the destruction of workers is now in a process
context (idle_cull_fn()), and the detaching of a worker is not required
to be inside the worker thread now, so just do the detaching directly
in idle_cull_fn().

wake_dying_workers() is renamed to detach_dying_workers() and the unneeded
wakeup in wake_dying_workers() is also removed.

Cc: Valentin Schneider <vschneid@redhat.com>
Signed-off-by: default avatarLai Jiangshan <jiangshan.ljs@antgroup.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent f45b1c3c
...@@ -2695,6 +2695,16 @@ static void unbind_worker(struct worker *worker) ...@@ -2695,6 +2695,16 @@ static void unbind_worker(struct worker *worker)
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
} }
static void detach_worker(struct worker *worker)
{
lockdep_assert_held(&wq_pool_attach_mutex);
unbind_worker(worker);
list_del(&worker->node);
worker->pool = NULL;
}
/** /**
* worker_detach_from_pool() - detach a worker from its pool * worker_detach_from_pool() - detach a worker from its pool
* @worker: worker which is attached to its pool * @worker: worker which is attached to its pool
...@@ -2711,11 +2721,7 @@ static void worker_detach_from_pool(struct worker *worker) ...@@ -2711,11 +2721,7 @@ static void worker_detach_from_pool(struct worker *worker)
WARN_ON_ONCE(pool->flags & POOL_BH); WARN_ON_ONCE(pool->flags & POOL_BH);
mutex_lock(&wq_pool_attach_mutex); mutex_lock(&wq_pool_attach_mutex);
detach_worker(worker);
unbind_worker(worker);
list_del(&worker->node);
worker->pool = NULL;
mutex_unlock(&wq_pool_attach_mutex); mutex_unlock(&wq_pool_attach_mutex);
/* clear leftover flags without pool->lock after it is detached */ /* clear leftover flags without pool->lock after it is detached */
...@@ -2807,24 +2813,12 @@ static struct worker *create_worker(struct worker_pool *pool) ...@@ -2807,24 +2813,12 @@ static struct worker *create_worker(struct worker_pool *pool)
return NULL; return NULL;
} }
static void wake_dying_workers(struct list_head *cull_list) static void detach_dying_workers(struct list_head *cull_list)
{ {
struct worker *worker; struct worker *worker;
list_for_each_entry(worker, cull_list, entry) { list_for_each_entry(worker, cull_list, entry)
unbind_worker(worker); detach_worker(worker);
/*
* If the worker was somehow already running, then it had to be
* in pool->idle_list when set_worker_dying() happened or we
* wouldn't have gotten here.
*
* Thus, the worker must either have observed the WORKER_DIE
* flag, or have set its state to TASK_IDLE. Either way, the
* below will be observed by the worker and is safe to do
* outside of pool->lock.
*/
wake_up_process(worker->task);
}
} }
static void reap_dying_workers(struct list_head *cull_list) static void reap_dying_workers(struct list_head *cull_list)
...@@ -2930,9 +2924,9 @@ static void idle_cull_fn(struct work_struct *work) ...@@ -2930,9 +2924,9 @@ static void idle_cull_fn(struct work_struct *work)
/* /*
* Grabbing wq_pool_attach_mutex here ensures an already-running worker * Grabbing wq_pool_attach_mutex here ensures an already-running worker
* cannot proceed beyong worker_detach_from_pool() in its self-destruct * cannot proceed beyong set_pf_worker() in its self-destruct path.
* path. This is required as a previously-preempted worker could run after * This is required as a previously-preempted worker could run after
* set_worker_dying() has happened but before wake_dying_workers() did. * set_worker_dying() has happened but before detach_dying_workers() did.
*/ */
mutex_lock(&wq_pool_attach_mutex); mutex_lock(&wq_pool_attach_mutex);
raw_spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
...@@ -2953,7 +2947,7 @@ static void idle_cull_fn(struct work_struct *work) ...@@ -2953,7 +2947,7 @@ static void idle_cull_fn(struct work_struct *work)
} }
raw_spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
wake_dying_workers(&cull_list); detach_dying_workers(&cull_list);
mutex_unlock(&wq_pool_attach_mutex); mutex_unlock(&wq_pool_attach_mutex);
reap_dying_workers(&cull_list); reap_dying_workers(&cull_list);
...@@ -3336,7 +3330,6 @@ static int worker_thread(void *__worker) ...@@ -3336,7 +3330,6 @@ static int worker_thread(void *__worker)
set_task_comm(worker->task, "kworker/dying"); set_task_comm(worker->task, "kworker/dying");
ida_free(&pool->worker_ida, worker->id); ida_free(&pool->worker_ida, worker->id);
worker_detach_from_pool(worker);
WARN_ON_ONCE(!list_empty(&worker->entry)); WARN_ON_ONCE(!list_empty(&worker->entry));
return 0; return 0;
} }
...@@ -4921,7 +4914,7 @@ static void put_unbound_pool(struct worker_pool *pool) ...@@ -4921,7 +4914,7 @@ static void put_unbound_pool(struct worker_pool *pool)
WARN_ON(pool->nr_workers || pool->nr_idle); WARN_ON(pool->nr_workers || pool->nr_idle);
raw_spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
wake_dying_workers(&cull_list); detach_dying_workers(&cull_list);
mutex_unlock(&wq_pool_attach_mutex); mutex_unlock(&wq_pool_attach_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment