Commit 75ccf595 authored by Tejun Heo's avatar Tejun Heo

workqueue: prepare flush_workqueue() for dynamic creation and destrucion of unbound pool_workqueues

Unbound pwqs (pool_workqueues) will be dynamically created and
destroyed with the scheduled unbound workqueue w/ custom attributes
support.  This patch synchronizes pwq linking and unlinking against
flush_workqueue() so that its operation isn't disturbed by pwqs coming
and going.

Linking and unlinking a pwq into wq->pwqs is now protected also by
wq->flush_mutex and a new pwq's work_color is initialized to
wq->work_color during linking.  This ensures that pwqs changes don't
disturb flush_workqueue() in progress and the new pwq's work coloring
stays in sync with the rest of the workqueue.

flush_mutex during unlinking isn't strictly necessary but it's simpler
to do it anyway.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reviewed-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
parent 8864b4e5
...@@ -122,6 +122,9 @@ enum { ...@@ -122,6 +122,9 @@ enum {
* W: workqueue_lock protected. * W: workqueue_lock protected.
* *
* R: workqueue_lock protected for writes. Sched-RCU protected for reads. * R: workqueue_lock protected for writes. Sched-RCU protected for reads.
*
* FR: wq->flush_mutex and workqueue_lock protected for writes. Sched-RCU
* protected for reads.
*/ */
/* struct worker is defined in workqueue_internal.h */ /* struct worker is defined in workqueue_internal.h */
...@@ -185,7 +188,7 @@ struct pool_workqueue { ...@@ -185,7 +188,7 @@ struct pool_workqueue {
int nr_active; /* L: nr of active works */ int nr_active; /* L: nr of active works */
int max_active; /* L: max active works */ int max_active; /* L: max active works */
struct list_head delayed_works; /* L: delayed works */ struct list_head delayed_works; /* L: delayed works */
struct list_head pwqs_node; /* R: node on wq->pwqs */ struct list_head pwqs_node; /* FR: node on wq->pwqs */
struct list_head mayday_node; /* W: node on wq->maydays */ struct list_head mayday_node; /* W: node on wq->maydays */
/* /*
...@@ -214,7 +217,7 @@ struct wq_flusher { ...@@ -214,7 +217,7 @@ struct wq_flusher {
struct workqueue_struct { struct workqueue_struct {
unsigned int flags; /* W: WQ_* flags */ unsigned int flags; /* W: WQ_* flags */
struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */ struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */
struct list_head pwqs; /* R: all pwqs of this wq */ struct list_head pwqs; /* FR: all pwqs of this wq */
struct list_head list; /* W: list of all workqueues */ struct list_head list; /* W: list of all workqueues */
struct mutex flush_mutex; /* protects wq flushing */ struct mutex flush_mutex; /* protects wq flushing */
...@@ -3402,9 +3405,16 @@ static void pwq_unbound_release_workfn(struct work_struct *work) ...@@ -3402,9 +3405,16 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
return; return;
/*
* Unlink @pwq. Synchronization against flush_mutex isn't strictly
* necessary on release but do it anyway. It's easier to verify
* and consistent with the linking path.
*/
mutex_lock(&wq->flush_mutex);
spin_lock_irq(&workqueue_lock); spin_lock_irq(&workqueue_lock);
list_del_rcu(&pwq->pwqs_node); list_del_rcu(&pwq->pwqs_node);
spin_unlock_irq(&workqueue_lock); spin_unlock_irq(&workqueue_lock);
mutex_unlock(&wq->flush_mutex);
put_unbound_pool(pool); put_unbound_pool(pool);
call_rcu_sched(&pwq->rcu, rcu_free_pwq); call_rcu_sched(&pwq->rcu, rcu_free_pwq);
...@@ -3432,7 +3442,18 @@ static void init_and_link_pwq(struct pool_workqueue *pwq, ...@@ -3432,7 +3442,18 @@ static void init_and_link_pwq(struct pool_workqueue *pwq,
INIT_LIST_HEAD(&pwq->mayday_node); INIT_LIST_HEAD(&pwq->mayday_node);
INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
/*
* Link @pwq and set the matching work_color. This is synchronized
* with flush_mutex to avoid confusing flush_workqueue().
*/
mutex_lock(&wq->flush_mutex);
spin_lock_irq(&workqueue_lock);
pwq->work_color = wq->work_color;
list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs); list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs);
spin_unlock_irq(&workqueue_lock);
mutex_unlock(&wq->flush_mutex);
} }
static int alloc_and_link_pwqs(struct workqueue_struct *wq) static int alloc_and_link_pwqs(struct workqueue_struct *wq)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment