Commit 4075409c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue updates from Tejun Heo:
 "Nothing too interesting. An optimization to short-circuit noop cpumask
  updates, debug dump code reorg, and doc update"

* 'for-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: doc: Call out the non-reentrance conditions
  workqueue: Introduce show_one_worker_pool and show_one_workqueue.
  workqueue: make sysfs of unbound kworker cpumask more clever
parents bba7d682 f9eaaa82
......@@ -216,10 +216,6 @@ resources, scheduled and executed.
This flag is meaningless for unbound wq.
Note that the flag ``WQ_NON_REENTRANT`` no longer exists as all
workqueues are now non-reentrant - any work item is guaranteed to be
executed by at most one worker system-wide at any given time.
``max_active``
--------------
......@@ -391,6 +387,23 @@ the stack trace of the offending worker thread. ::
The work item's function should be trivially visible in the stack
trace.
Non-reentrance Conditions
=========================
Workqueue guarantees that a work item cannot be re-entrant if the following
conditions hold after a work item gets queued:
1. The work function hasn't been changed.
2. No one queues the work item to another workqueue.
3. The work item hasn't been reinitiated.
In other words, if the above conditions hold, the work item is guaranteed to be
executed by at most one worker system-wide at any given time.
Note that requeuing the work item (to the same queue) in the self function
doesn't break these conditions, so it's safe to do. Otherwise, caution is
required when breaking the conditions inside a work function.
Kernel Inline Documentations Reference
======================================
......
......@@ -296,7 +296,7 @@ static const struct sysrq_key_op sysrq_showregs_op = {
static void sysrq_handle_showstate(int key)
{
show_state();
show_workqueue_state();
show_all_workqueues();
}
static const struct sysrq_key_op sysrq_showstate_op = {
.handler = sysrq_handle_showstate,
......
......@@ -469,7 +469,8 @@ extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
extern unsigned int work_busy(struct work_struct *work);
extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
extern void print_worker_info(const char *log_lvl, struct task_struct *task);
extern void show_workqueue_state(void);
extern void show_all_workqueues(void);
extern void show_one_workqueue(struct workqueue_struct *wq);
extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
/**
......
......@@ -94,7 +94,7 @@ static int try_to_freeze_tasks(bool user_only)
todo - wq_busy, wq_busy);
if (wq_busy)
show_workqueue_state();
show_all_workqueues();
if (!wakeup || pm_debug_messages_on) {
read_lock(&tasklist_lock);
......
......@@ -375,6 +375,7 @@ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
static int worker_thread(void *__worker);
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
static void show_pwq(struct pool_workqueue *pwq);
static void show_one_worker_pool(struct worker_pool *pool);
#define CREATE_TRACE_POINTS
#include <trace/events/workqueue.h>
......@@ -4447,7 +4448,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
raw_spin_unlock_irq(&pwq->pool->lock);
mutex_unlock(&wq->mutex);
mutex_unlock(&wq_pool_mutex);
show_workqueue_state();
show_one_workqueue(wq);
return;
}
raw_spin_unlock_irq(&pwq->pool->lock);
......@@ -4797,97 +4798,116 @@ static void show_pwq(struct pool_workqueue *pwq)
}
/**
* show_workqueue_state - dump workqueue state
*
* Called from a sysrq handler or try_to_freeze_tasks() and prints out
* all busy workqueues and pools.
* show_one_workqueue - dump state of specified workqueue
* @wq: workqueue whose state will be printed
*/
void show_workqueue_state(void)
void show_one_workqueue(struct workqueue_struct *wq)
{
struct workqueue_struct *wq;
struct worker_pool *pool;
struct pool_workqueue *pwq;
bool idle = true;
unsigned long flags;
int pi;
rcu_read_lock();
pr_info("Showing busy workqueues and worker pools:\n");
list_for_each_entry_rcu(wq, &workqueues, list) {
struct pool_workqueue *pwq;
bool idle = true;
for_each_pwq(pwq, wq) {
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
idle = false;
break;
}
for_each_pwq(pwq, wq) {
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
idle = false;
break;
}
if (idle)
continue;
}
if (idle) /* Nothing to print for idle workqueue */
return;
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
for_each_pwq(pwq, wq) {
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
/*
* Defer printing to avoid deadlocks in console
* drivers that queue work while holding locks
* also taken in their write paths.
*/
printk_deferred_enter();
show_pwq(pwq);
printk_deferred_exit();
}
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
for_each_pwq(pwq, wq) {
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
/*
* We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_workqueue_state(). Avoid triggering
* hard lockup.
* Defer printing to avoid deadlocks in console
* drivers that queue work while holding locks
* also taken in their write paths.
*/
touch_nmi_watchdog();
}
}
for_each_pool(pool, pi) {
struct worker *worker;
bool first = true;
raw_spin_lock_irqsave(&pool->lock, flags);
if (pool->nr_workers == pool->nr_idle)
goto next_pool;
/*
* Defer printing to avoid deadlocks in console drivers that
* queue work while holding locks also taken in their write
* paths.
*/
printk_deferred_enter();
pr_info("pool %d:", pool->id);
pr_cont_pool_info(pool);
pr_cont(" hung=%us workers=%d",
jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
pool->nr_workers);
if (pool->manager)
pr_cont(" manager: %d",
task_pid_nr(pool->manager->task));
list_for_each_entry(worker, &pool->idle_list, entry) {
pr_cont(" %s%d", first ? "idle: " : "",
task_pid_nr(worker->task));
first = false;
printk_deferred_enter();
show_pwq(pwq);
printk_deferred_exit();
}
pr_cont("\n");
printk_deferred_exit();
next_pool:
raw_spin_unlock_irqrestore(&pool->lock, flags);
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
/*
* We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_workqueue_state(). Avoid triggering
* sysrq-t -> show_all_workqueues(). Avoid triggering
* hard lockup.
*/
touch_nmi_watchdog();
}
}
/**
* show_one_worker_pool - dump state of specified worker pool
* @pool: worker pool whose state will be printed
*/
static void show_one_worker_pool(struct worker_pool *pool)
{
struct worker *worker;
bool first = true;
unsigned long flags;
raw_spin_lock_irqsave(&pool->lock, flags);
if (pool->nr_workers == pool->nr_idle)
goto next_pool;
/*
* Defer printing to avoid deadlocks in console drivers that
* queue work while holding locks also taken in their write
* paths.
*/
printk_deferred_enter();
pr_info("pool %d:", pool->id);
pr_cont_pool_info(pool);
pr_cont(" hung=%us workers=%d",
jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
pool->nr_workers);
if (pool->manager)
pr_cont(" manager: %d",
task_pid_nr(pool->manager->task));
list_for_each_entry(worker, &pool->idle_list, entry) {
pr_cont(" %s%d", first ? "idle: " : "",
task_pid_nr(worker->task));
first = false;
}
pr_cont("\n");
printk_deferred_exit();
next_pool:
raw_spin_unlock_irqrestore(&pool->lock, flags);
/*
* We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_all_workqueues(). Avoid triggering
* hard lockup.
*/
touch_nmi_watchdog();
}
/**
* show_all_workqueues - dump workqueue state
*
* Called from a sysrq handler or try_to_freeze_tasks() and prints out
* all busy workqueues and pools.
*/
void show_all_workqueues(void)
{
struct workqueue_struct *wq;
struct worker_pool *pool;
int pi;
rcu_read_lock();
pr_info("Showing busy workqueues and worker pools:\n");
list_for_each_entry_rcu(wq, &workqueues, list)
show_one_workqueue(wq);
for_each_pool(pool, pi)
show_one_worker_pool(pool);
rcu_read_unlock();
}
......@@ -5384,9 +5404,6 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
int ret = -EINVAL;
cpumask_var_t saved_cpumask;
if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
return -ENOMEM;
/*
* Not excluding isolated cpus on purpose.
* If the user wishes to include them, we allow that.
......@@ -5394,6 +5411,15 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
cpumask_and(cpumask, cpumask, cpu_possible_mask);
if (!cpumask_empty(cpumask)) {
apply_wqattrs_lock();
if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
ret = 0;
goto out_unlock;
}
if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) {
ret = -ENOMEM;
goto out_unlock;
}
/* save the old wq_unbound_cpumask. */
cpumask_copy(saved_cpumask, wq_unbound_cpumask);
......@@ -5406,10 +5432,11 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
if (ret < 0)
cpumask_copy(wq_unbound_cpumask, saved_cpumask);
free_cpumask_var(saved_cpumask);
out_unlock:
apply_wqattrs_unlock();
}
free_cpumask_var(saved_cpumask);
return ret;
}
......@@ -5869,7 +5896,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
rcu_read_unlock();
if (lockup_detected)
show_workqueue_state();
show_all_workqueues();
wq_watchdog_reset_touched();
mod_timer(&wq_watchdog_timer, jiffies + thresh);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment