Commit 23c97060 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue updates from Tejun Heo:
 "Only three commits, of which two are trivial.

  The non-trivial chagne is Thomas's patch to switch workqueue from
  sched RCU to regular one. The use of sched RCU is mostly historic and
  doesn't really buy us anything noticeable"

* 'for-5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: Use normal rcu
  kernel/workqueue: Document wq_worker_last_func() argument
  kernel/workqueue: Use __printf markup to silence compiler in function 'alloc_workqueue'
parents b1e76c3d 24acfb71
...@@ -127,16 +127,16 @@ enum { ...@@ -127,16 +127,16 @@ enum {
* *
* PL: wq_pool_mutex protected. * PL: wq_pool_mutex protected.
* *
* PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. * PR: wq_pool_mutex protected for writes. RCU protected for reads.
* *
* PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
* *
* PWR: wq_pool_mutex and wq->mutex protected for writes. Either or * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
* sched-RCU for reads. * RCU for reads.
* *
* WQ: wq->mutex protected. * WQ: wq->mutex protected.
* *
* WR: wq->mutex protected for writes. Sched-RCU protected for reads. * WR: wq->mutex protected for writes. RCU protected for reads.
* *
* MD: wq_mayday_lock protected. * MD: wq_mayday_lock protected.
*/ */
...@@ -183,7 +183,7 @@ struct worker_pool { ...@@ -183,7 +183,7 @@ struct worker_pool {
atomic_t nr_running ____cacheline_aligned_in_smp; atomic_t nr_running ____cacheline_aligned_in_smp;
/* /*
* Destruction of pool is sched-RCU protected to allow dereferences * Destruction of pool is RCU protected to allow dereferences
* from get_work_pool(). * from get_work_pool().
*/ */
struct rcu_head rcu; struct rcu_head rcu;
...@@ -212,7 +212,7 @@ struct pool_workqueue { ...@@ -212,7 +212,7 @@ struct pool_workqueue {
/* /*
* Release of unbound pwq is punted to system_wq. See put_pwq() * Release of unbound pwq is punted to system_wq. See put_pwq()
* and pwq_unbound_release_workfn() for details. pool_workqueue * and pwq_unbound_release_workfn() for details. pool_workqueue
* itself is also sched-RCU protected so that the first pwq can be * itself is also RCU protected so that the first pwq can be
* determined without grabbing wq->mutex. * determined without grabbing wq->mutex.
*/ */
struct work_struct unbound_release_work; struct work_struct unbound_release_work;
...@@ -266,8 +266,8 @@ struct workqueue_struct { ...@@ -266,8 +266,8 @@ struct workqueue_struct {
char name[WQ_NAME_LEN]; /* I: workqueue name */ char name[WQ_NAME_LEN]; /* I: workqueue name */
/* /*
* Destruction of workqueue_struct is sched-RCU protected to allow * Destruction of workqueue_struct is RCU protected to allow walking
* walking the workqueues list without grabbing wq_pool_mutex. * the workqueues list without grabbing wq_pool_mutex.
* This is used to dump all workqueues from sysrq. * This is used to dump all workqueues from sysrq.
*/ */
struct rcu_head rcu; struct rcu_head rcu;
...@@ -359,20 +359,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); ...@@ -359,20 +359,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
#include <trace/events/workqueue.h> #include <trace/events/workqueue.h>
#define assert_rcu_or_pool_mutex() \ #define assert_rcu_or_pool_mutex() \
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&wq_pool_mutex), \ !lockdep_is_held(&wq_pool_mutex), \
"sched RCU or wq_pool_mutex should be held") "RCU or wq_pool_mutex should be held")
#define assert_rcu_or_wq_mutex(wq) \ #define assert_rcu_or_wq_mutex(wq) \
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&wq->mutex), \ !lockdep_is_held(&wq->mutex), \
"sched RCU or wq->mutex should be held") "RCU or wq->mutex should be held")
#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&wq->mutex) && \ !lockdep_is_held(&wq->mutex) && \
!lockdep_is_held(&wq_pool_mutex), \ !lockdep_is_held(&wq_pool_mutex), \
"sched RCU, wq->mutex or wq_pool_mutex should be held") "RCU, wq->mutex or wq_pool_mutex should be held")
#define for_each_cpu_worker_pool(pool, cpu) \ #define for_each_cpu_worker_pool(pool, cpu) \
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
...@@ -384,7 +384,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); ...@@ -384,7 +384,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
* @pool: iteration cursor * @pool: iteration cursor
* @pi: integer used for iteration * @pi: integer used for iteration
* *
* This must be called either with wq_pool_mutex held or sched RCU read * This must be called either with wq_pool_mutex held or RCU read
* locked. If the pool needs to be used beyond the locking in effect, the * locked. If the pool needs to be used beyond the locking in effect, the
* caller is responsible for guaranteeing that the pool stays online. * caller is responsible for guaranteeing that the pool stays online.
* *
...@@ -416,7 +416,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); ...@@ -416,7 +416,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
* @pwq: iteration cursor * @pwq: iteration cursor
* @wq: the target workqueue * @wq: the target workqueue
* *
* This must be called either with wq->mutex held or sched RCU read locked. * This must be called either with wq->mutex held or RCU read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is * If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online. * responsible for guaranteeing that the pwq stays online.
* *
...@@ -552,7 +552,7 @@ static int worker_pool_assign_id(struct worker_pool *pool) ...@@ -552,7 +552,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
* @wq: the target workqueue * @wq: the target workqueue
* @node: the node ID * @node: the node ID
* *
* This must be called with any of wq_pool_mutex, wq->mutex or sched RCU * This must be called with any of wq_pool_mutex, wq->mutex or RCU
* read locked. * read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is * If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online. * responsible for guaranteeing that the pwq stays online.
...@@ -696,8 +696,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) ...@@ -696,8 +696,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
* @work: the work item of interest * @work: the work item of interest
* *
* Pools are created and destroyed under wq_pool_mutex, and allows read * Pools are created and destroyed under wq_pool_mutex, and allows read
* access under sched-RCU read lock. As such, this function should be * access under RCU read lock. As such, this function should be
* called under wq_pool_mutex or with preemption disabled. * called under wq_pool_mutex or inside of a rcu_read_lock() region.
* *
* All fields of the returned pool are accessible as long as the above * All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used * mentioned locking is in effect. If the returned pool needs to be used
...@@ -907,6 +907,7 @@ void wq_worker_sleeping(struct task_struct *task) ...@@ -907,6 +907,7 @@ void wq_worker_sleeping(struct task_struct *task)
/** /**
* wq_worker_last_func - retrieve worker's last work function * wq_worker_last_func - retrieve worker's last work function
* @task: Task to retrieve last work function of.
* *
* Determine the last function a worker executed. This is called from * Determine the last function a worker executed. This is called from
* the scheduler to get a worker's last known identity. * the scheduler to get a worker's last known identity.
...@@ -1126,7 +1127,7 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) ...@@ -1126,7 +1127,7 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
{ {
if (pwq) { if (pwq) {
/* /*
* As both pwqs and pools are sched-RCU protected, the * As both pwqs and pools are RCU protected, the
* following lock operations are safe. * following lock operations are safe.
*/ */
spin_lock_irq(&pwq->pool->lock); spin_lock_irq(&pwq->pool->lock);
...@@ -1254,6 +1255,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, ...@@ -1254,6 +1255,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0; return 0;
rcu_read_lock();
/* /*
* The queueing is in progress, or it is already queued. Try to * The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING. * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
...@@ -1292,10 +1294,12 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, ...@@ -1292,10 +1294,12 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
set_work_pool_and_keep_pending(work, pool->id); set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
rcu_read_unlock();
return 1; return 1;
} }
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
fail: fail:
rcu_read_unlock();
local_irq_restore(*flags); local_irq_restore(*flags);
if (work_is_canceling(work)) if (work_is_canceling(work))
return -ENOENT; return -ENOENT;
...@@ -1409,6 +1413,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1409,6 +1413,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
if (unlikely(wq->flags & __WQ_DRAINING) && if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq))) WARN_ON_ONCE(!is_chained_work(wq)))
return; return;
rcu_read_lock();
retry: retry:
if (req_cpu == WORK_CPU_UNBOUND) if (req_cpu == WORK_CPU_UNBOUND)
cpu = wq_select_unbound_cpu(raw_smp_processor_id()); cpu = wq_select_unbound_cpu(raw_smp_processor_id());
...@@ -1465,10 +1470,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1465,10 +1470,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
/* pwq determined, queue */ /* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work); trace_workqueue_queue_work(req_cpu, pwq, work);
if (WARN_ON(!list_empty(&work->entry))) { if (WARN_ON(!list_empty(&work->entry)))
spin_unlock(&pwq->pool->lock); goto out;
return;
}
pwq->nr_in_flight[pwq->work_color]++; pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color); work_flags = work_color_to_flags(pwq->work_color);
...@@ -1486,7 +1489,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1486,7 +1489,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
insert_work(pwq, work, worklist, work_flags); insert_work(pwq, work, worklist, work_flags);
out:
spin_unlock(&pwq->pool->lock); spin_unlock(&pwq->pool->lock);
rcu_read_unlock();
} }
/** /**
...@@ -2968,14 +2973,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, ...@@ -2968,14 +2973,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
might_sleep(); might_sleep();
local_irq_disable(); rcu_read_lock();
pool = get_work_pool(work); pool = get_work_pool(work);
if (!pool) { if (!pool) {
local_irq_enable(); rcu_read_unlock();
return false; return false;
} }
spin_lock(&pool->lock); spin_lock_irq(&pool->lock);
/* see the comment in try_to_grab_pending() with the same code */ /* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work); pwq = get_work_pwq(work);
if (pwq) { if (pwq) {
...@@ -3007,10 +3012,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, ...@@ -3007,10 +3012,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
lock_map_acquire(&pwq->wq->lockdep_map); lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map); lock_map_release(&pwq->wq->lockdep_map);
} }
rcu_read_unlock();
return true; return true;
already_gone: already_gone:
spin_unlock_irq(&pool->lock); spin_unlock_irq(&pool->lock);
rcu_read_unlock();
return false; return false;
} }
...@@ -3497,7 +3503,7 @@ static void rcu_free_pool(struct rcu_head *rcu) ...@@ -3497,7 +3503,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
* put_unbound_pool - put a worker_pool * put_unbound_pool - put a worker_pool
* @pool: worker_pool to put * @pool: worker_pool to put
* *
* Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
* safe manner. get_unbound_pool() calls this function on its failure path * safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through, * and this function should be able to release pools which went through,
* successfully or not, init_worker_pool(). * successfully or not, init_worker_pool().
...@@ -3551,7 +3557,7 @@ static void put_unbound_pool(struct worker_pool *pool) ...@@ -3551,7 +3557,7 @@ static void put_unbound_pool(struct worker_pool *pool)
del_timer_sync(&pool->idle_timer); del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer); del_timer_sync(&pool->mayday_timer);
/* sched-RCU protected to allow dereferences from get_work_pool() */ /* RCU protected to allow dereferences from get_work_pool() */
call_rcu(&pool->rcu, rcu_free_pool); call_rcu(&pool->rcu, rcu_free_pool);
} }
...@@ -4202,6 +4208,7 @@ static int init_rescuer(struct workqueue_struct *wq) ...@@ -4202,6 +4208,7 @@ static int init_rescuer(struct workqueue_struct *wq)
return 0; return 0;
} }
__printf(1, 4)
struct workqueue_struct *alloc_workqueue(const char *fmt, struct workqueue_struct *alloc_workqueue(const char *fmt,
unsigned int flags, unsigned int flags,
int max_active, ...) int max_active, ...)
...@@ -4465,7 +4472,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) ...@@ -4465,7 +4472,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
struct pool_workqueue *pwq; struct pool_workqueue *pwq;
bool ret; bool ret;
rcu_read_lock_sched(); rcu_read_lock();
preempt_disable();
if (cpu == WORK_CPU_UNBOUND) if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id(); cpu = smp_processor_id();
...@@ -4476,7 +4484,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) ...@@ -4476,7 +4484,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works); ret = !list_empty(&pwq->delayed_works);
rcu_read_unlock_sched(); preempt_enable();
rcu_read_unlock();
return ret; return ret;
} }
...@@ -4502,15 +4511,15 @@ unsigned int work_busy(struct work_struct *work) ...@@ -4502,15 +4511,15 @@ unsigned int work_busy(struct work_struct *work)
if (work_pending(work)) if (work_pending(work))
ret |= WORK_BUSY_PENDING; ret |= WORK_BUSY_PENDING;
local_irq_save(flags); rcu_read_lock();
pool = get_work_pool(work); pool = get_work_pool(work);
if (pool) { if (pool) {
spin_lock(&pool->lock); spin_lock_irqsave(&pool->lock, flags);
if (find_worker_executing_work(pool, work)) if (find_worker_executing_work(pool, work))
ret |= WORK_BUSY_RUNNING; ret |= WORK_BUSY_RUNNING;
spin_unlock(&pool->lock); spin_unlock_irqrestore(&pool->lock, flags);
} }
local_irq_restore(flags); rcu_read_unlock();
return ret; return ret;
} }
...@@ -4694,7 +4703,7 @@ void show_workqueue_state(void) ...@@ -4694,7 +4703,7 @@ void show_workqueue_state(void)
unsigned long flags; unsigned long flags;
int pi; int pi;
rcu_read_lock_sched(); rcu_read_lock();
pr_info("Showing busy workqueues and worker pools:\n"); pr_info("Showing busy workqueues and worker pools:\n");
...@@ -4759,7 +4768,7 @@ void show_workqueue_state(void) ...@@ -4759,7 +4768,7 @@ void show_workqueue_state(void)
touch_nmi_watchdog(); touch_nmi_watchdog();
} }
rcu_read_unlock_sched(); rcu_read_unlock();
} }
/* used to show worker information through /proc/PID/{comm,stat,status} */ /* used to show worker information through /proc/PID/{comm,stat,status} */
...@@ -5146,16 +5155,16 @@ bool freeze_workqueues_busy(void) ...@@ -5146,16 +5155,16 @@ bool freeze_workqueues_busy(void)
* nr_active is monotonically decreasing. It's safe * nr_active is monotonically decreasing. It's safe
* to peek without lock. * to peek without lock.
*/ */
rcu_read_lock_sched(); rcu_read_lock();
for_each_pwq(pwq, wq) { for_each_pwq(pwq, wq) {
WARN_ON_ONCE(pwq->nr_active < 0); WARN_ON_ONCE(pwq->nr_active < 0);
if (pwq->nr_active) { if (pwq->nr_active) {
busy = true; busy = true;
rcu_read_unlock_sched(); rcu_read_unlock();
goto out_unlock; goto out_unlock;
} }
} }
rcu_read_unlock_sched(); rcu_read_unlock();
} }
out_unlock: out_unlock:
mutex_unlock(&wq_pool_mutex); mutex_unlock(&wq_pool_mutex);
...@@ -5350,7 +5359,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, ...@@ -5350,7 +5359,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
const char *delim = ""; const char *delim = "";
int node, written = 0; int node, written = 0;
rcu_read_lock_sched(); get_online_cpus();
rcu_read_lock();
for_each_node(node) { for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written, written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node, "%s%d:%d", delim, node,
...@@ -5358,7 +5368,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, ...@@ -5358,7 +5368,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
delim = " "; delim = " ";
} }
written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
rcu_read_unlock_sched(); rcu_read_unlock();
put_online_cpus();
return written; return written;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment