Commit b5927605 authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Tejun Heo

workqueue: remove pwq_lock which is no longer used

To simplify locking, the previous patches expanded wq->mutex to
protect all fields of each workqueue instance including the pwqs list
leaving pwq_lock without any user.  Remove the unused pwq_lock.

tj: Rebased on top of the current dev branch.  Updated description.
Signed-off-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent a357fc03
...@@ -125,12 +125,9 @@ enum { ...@@ -125,12 +125,9 @@ enum {
* *
* PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
* *
* PW: pwq_lock protected.
*
* WQ: wq->mutex protected. * WQ: wq->mutex protected.
* *
* WR: wq->mutex and pwq_lock protected for writes. Sched-RCU protected * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
* for reads.
* *
* MD: wq_mayday_lock protected. * MD: wq_mayday_lock protected.
*/ */
...@@ -257,7 +254,6 @@ struct workqueue_struct { ...@@ -257,7 +254,6 @@ struct workqueue_struct {
static struct kmem_cache *pwq_cache; static struct kmem_cache *pwq_cache;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_SPINLOCK(pwq_lock); /* protects pool_workqueues */
static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
static LIST_HEAD(workqueues); /* PL: list of all workqueues */ static LIST_HEAD(workqueues); /* PL: list of all workqueues */
...@@ -300,8 +296,7 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to, ...@@ -300,8 +296,7 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
#define assert_rcu_or_wq_mutex(wq) \ #define assert_rcu_or_wq_mutex(wq) \
rcu_lockdep_assert(rcu_read_lock_sched_held() || \ rcu_lockdep_assert(rcu_read_lock_sched_held() || \
lockdep_is_held(&wq->mutex) || \ lockdep_is_held(&wq->mutex), \
lockdep_is_held(&pwq_lock), \
"sched RCU or wq->mutex should be held") "sched RCU or wq->mutex should be held")
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
...@@ -3549,9 +3544,7 @@ static void pwq_unbound_release_workfn(struct work_struct *work) ...@@ -3549,9 +3544,7 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
* and consistent with the linking path. * and consistent with the linking path.
*/ */
mutex_lock(&wq->mutex); mutex_lock(&wq->mutex);
spin_lock_irq(&pwq_lock);
list_del_rcu(&pwq->pwqs_node); list_del_rcu(&pwq->pwqs_node);
spin_unlock_irq(&pwq_lock);
mutex_unlock(&wq->mutex); mutex_unlock(&wq->mutex);
put_unbound_pool(pool); put_unbound_pool(pool);
...@@ -3635,9 +3628,7 @@ static void init_and_link_pwq(struct pool_workqueue *pwq, ...@@ -3635,9 +3628,7 @@ static void init_and_link_pwq(struct pool_workqueue *pwq,
pwq_adjust_max_active(pwq); pwq_adjust_max_active(pwq);
/* link in @pwq */ /* link in @pwq */
spin_lock_irq(&pwq_lock);
list_add_rcu(&pwq->pwqs_node, &wq->pwqs); list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
spin_unlock_irq(&pwq_lock);
mutex_unlock(&wq->mutex); mutex_unlock(&wq->mutex);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment