Commit 111c225a authored by Tejun Heo's avatar Tejun Heo

workqueue: set PF_WQ_WORKER on rescuers

PF_WQ_WORKER is used to tell scheduler that the task is a workqueue
worker and needs wq_worker_sleeping/waking_up() invoked on it for
concurrency management.  As rescuers never participate in concurrency
management, PF_WQ_WORKER wasn't set on them.

There's a need for an interface which can query whether %current is
executing a work item and if so which.  Such interface requires a way
to identify all tasks which may execute work items and PF_WQ_WORKER
will be used for that.  As all normal workers always have PF_WQ_WORKER
set, we only need to add it to rescuers.

As rescuers start with WORKER_PREP but never clear it, it's always
NOT_RUNNING and there's no need to worry about it interfering with
concurrency management even if PF_WQ_WORKER is set; however, unlike
normal workers, rescuers currently don't have its worker struct as
kthread_data().  It uses the associated workqueue_struct instead.
This is problematic as wq_worker_sleeping/waking_up() expect struct
worker at kthread_data().

This patch adds worker->rescue_wq and start rescuer kthreads with
worker struct as kthread_data and sets PF_WQ_WORKER on rescuers.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
parent 023f27d3
...@@ -149,6 +149,9 @@ struct worker { ...@@ -149,6 +149,9 @@ struct worker {
/* for rebinding worker to CPU */ /* for rebinding worker to CPU */
struct work_struct rebind_work; /* L: for busy worker */ struct work_struct rebind_work; /* L: for busy worker */
/* used only by rescuers to point to the target workqueue */
struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
}; };
struct worker_pool { struct worker_pool {
...@@ -763,12 +766,20 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, ...@@ -763,12 +766,20 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
unsigned int cpu) unsigned int cpu)
{ {
struct worker *worker = kthread_data(task), *to_wakeup = NULL; struct worker *worker = kthread_data(task), *to_wakeup = NULL;
struct worker_pool *pool = worker->pool; struct worker_pool *pool;
atomic_t *nr_running = get_pool_nr_running(pool); atomic_t *nr_running;
/*
* Rescuers, which may not have all the fields set up like normal
* workers, also reach here, let's not access anything before
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING) if (worker->flags & WORKER_NOT_RUNNING)
return NULL; return NULL;
pool = worker->pool;
nr_running = get_pool_nr_running(pool);
/* this can only happen on the local cpu */ /* this can only happen on the local cpu */
BUG_ON(cpu != raw_smp_processor_id()); BUG_ON(cpu != raw_smp_processor_id());
...@@ -2357,7 +2368,7 @@ static int worker_thread(void *__worker) ...@@ -2357,7 +2368,7 @@ static int worker_thread(void *__worker)
/** /**
* rescuer_thread - the rescuer thread function * rescuer_thread - the rescuer thread function
* @__wq: the associated workqueue * @__rescuer: self
* *
* Workqueue rescuer thread function. There's one rescuer for each * Workqueue rescuer thread function. There's one rescuer for each
* workqueue which has WQ_RESCUER set. * workqueue which has WQ_RESCUER set.
...@@ -2374,20 +2385,27 @@ static int worker_thread(void *__worker) ...@@ -2374,20 +2385,27 @@ static int worker_thread(void *__worker)
* *
* This should happen rarely. * This should happen rarely.
*/ */
static int rescuer_thread(void *__wq) static int rescuer_thread(void *__rescuer)
{ {
struct workqueue_struct *wq = __wq; struct worker *rescuer = __rescuer;
struct worker *rescuer = wq->rescuer; struct workqueue_struct *wq = rescuer->rescue_wq;
struct list_head *scheduled = &rescuer->scheduled; struct list_head *scheduled = &rescuer->scheduled;
bool is_unbound = wq->flags & WQ_UNBOUND; bool is_unbound = wq->flags & WQ_UNBOUND;
unsigned int cpu; unsigned int cpu;
set_user_nice(current, RESCUER_NICE_LEVEL); set_user_nice(current, RESCUER_NICE_LEVEL);
/*
* Mark rescuer as worker too. As WORKER_PREP is never cleared, it
* doesn't participate in concurrency management.
*/
rescuer->task->flags |= PF_WQ_WORKER;
repeat: repeat:
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop()) { if (kthread_should_stop()) {
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
rescuer->task->flags &= ~PF_WQ_WORKER;
return 0; return 0;
} }
...@@ -2431,6 +2449,8 @@ static int rescuer_thread(void *__wq) ...@@ -2431,6 +2449,8 @@ static int rescuer_thread(void *__wq)
spin_unlock_irq(&gcwq->lock); spin_unlock_irq(&gcwq->lock);
} }
/* rescuers should never participate in concurrency management */
WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
schedule(); schedule();
goto repeat; goto repeat;
} }
...@@ -3266,7 +3286,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, ...@@ -3266,7 +3286,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
if (!rescuer) if (!rescuer)
goto err; goto err;
rescuer->task = kthread_create(rescuer_thread, wq, "%s", rescuer->rescue_wq = wq;
rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
wq->name); wq->name);
if (IS_ERR(rescuer->task)) if (IS_ERR(rescuer->task))
goto err; goto err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment