Commit e2dca7ad authored by Tejun Heo's avatar Tejun Heo

workqueue: make the workqueues list RCU walkable

The workqueues list is protected by wq_pool_mutex and a workqueue and
its subordinate data structures are freed directly on destruction.  We
want to add the ability dump workqueues from a sysrq callback which
requires walking all workqueues without grabbing wq_pool_mutex.  This
patch makes freeing of workqueues RCU protected and makes the
workqueues list walkable while holding RCU read lock.

Note that pool_workqueues and pools are already sched-RCU protected.
For consistency, workqueues are also protected with sched-RCU.

While at it, reverse the workqueues list so that a workqueue which is
created earlier comes before.  The order of the list isn't significant
functionally but this makes the planned sysrq dump list system
workqueues first.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 8603e1b3
...@@ -230,7 +230,7 @@ struct wq_device; ...@@ -230,7 +230,7 @@ struct wq_device;
*/ */
struct workqueue_struct { struct workqueue_struct {
struct list_head pwqs; /* WR: all pwqs of this wq */ struct list_head pwqs; /* WR: all pwqs of this wq */
struct list_head list; /* PL: list of all workqueues */ struct list_head list; /* PR: list of all workqueues */
struct mutex mutex; /* protects this wq */ struct mutex mutex; /* protects this wq */
int work_color; /* WQ: current work color */ int work_color; /* WQ: current work color */
...@@ -257,6 +257,13 @@ struct workqueue_struct { ...@@ -257,6 +257,13 @@ struct workqueue_struct {
#endif #endif
char name[WQ_NAME_LEN]; /* I: workqueue name */ char name[WQ_NAME_LEN]; /* I: workqueue name */
/*
* Destruction of workqueue_struct is sched-RCU protected to allow
* walking the workqueues list without grabbing wq_pool_mutex.
* This is used to dump all workqueues from sysrq.
*/
struct rcu_head rcu;
/* hot fields used during command issue, aligned to cacheline */ /* hot fields used during command issue, aligned to cacheline */
unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */ struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
...@@ -288,7 +295,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; ...@@ -288,7 +295,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
static LIST_HEAD(workqueues); /* PL: list of all workqueues */ static LIST_HEAD(workqueues); /* PR: list of all workqueues */
static bool workqueue_freezing; /* PL: have wqs started freezing? */ static bool workqueue_freezing; /* PL: have wqs started freezing? */
/* the per-cpu worker pools */ /* the per-cpu worker pools */
...@@ -3424,6 +3431,20 @@ static int init_worker_pool(struct worker_pool *pool) ...@@ -3424,6 +3431,20 @@ static int init_worker_pool(struct worker_pool *pool)
return 0; return 0;
} }
static void rcu_free_wq(struct rcu_head *rcu)
{
struct workqueue_struct *wq =
container_of(rcu, struct workqueue_struct, rcu);
if (!(wq->flags & WQ_UNBOUND))
free_percpu(wq->cpu_pwqs);
else
free_workqueue_attrs(wq->unbound_attrs);
kfree(wq->rescuer);
kfree(wq);
}
static void rcu_free_pool(struct rcu_head *rcu) static void rcu_free_pool(struct rcu_head *rcu)
{ {
struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
...@@ -3601,12 +3622,10 @@ static void pwq_unbound_release_workfn(struct work_struct *work) ...@@ -3601,12 +3622,10 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
/* /*
* If we're the last pwq going away, @wq is already dead and no one * If we're the last pwq going away, @wq is already dead and no one
* is gonna access it anymore. Free it. * is gonna access it anymore. Schedule RCU free.
*/ */
if (is_last) { if (is_last)
free_workqueue_attrs(wq->unbound_attrs); call_rcu_sched(&wq->rcu, rcu_free_wq);
kfree(wq);
}
} }
/** /**
...@@ -4143,7 +4162,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, ...@@ -4143,7 +4162,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
pwq_adjust_max_active(pwq); pwq_adjust_max_active(pwq);
mutex_unlock(&wq->mutex); mutex_unlock(&wq->mutex);
list_add(&wq->list, &workqueues); list_add_tail_rcu(&wq->list, &workqueues);
mutex_unlock(&wq_pool_mutex); mutex_unlock(&wq_pool_mutex);
...@@ -4199,24 +4218,20 @@ void destroy_workqueue(struct workqueue_struct *wq) ...@@ -4199,24 +4218,20 @@ void destroy_workqueue(struct workqueue_struct *wq)
* flushing is complete in case freeze races us. * flushing is complete in case freeze races us.
*/ */
mutex_lock(&wq_pool_mutex); mutex_lock(&wq_pool_mutex);
list_del_init(&wq->list); list_del_rcu(&wq->list);
mutex_unlock(&wq_pool_mutex); mutex_unlock(&wq_pool_mutex);
workqueue_sysfs_unregister(wq); workqueue_sysfs_unregister(wq);
if (wq->rescuer) { if (wq->rescuer)
kthread_stop(wq->rescuer->task); kthread_stop(wq->rescuer->task);
kfree(wq->rescuer);
wq->rescuer = NULL;
}
if (!(wq->flags & WQ_UNBOUND)) { if (!(wq->flags & WQ_UNBOUND)) {
/* /*
* The base ref is never dropped on per-cpu pwqs. Directly * The base ref is never dropped on per-cpu pwqs. Directly
* free the pwqs and wq. * schedule RCU free.
*/ */
free_percpu(wq->cpu_pwqs); call_rcu_sched(&wq->rcu, rcu_free_wq);
kfree(wq);
} else { } else {
/* /*
* We're the sole accessor of @wq at this point. Directly * We're the sole accessor of @wq at this point. Directly
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment