Commit a0a1a5fd authored by Tejun Heo's avatar Tejun Heo

workqueue: reimplement workqueue freeze using max_active

Currently, workqueue freezing is implemented by marking the worker
freezeable and calling try_to_freeze() from dispatch loop.
Reimplement it using cwq->limit so that the workqueue is frozen
instead of the worker.

* workqueue_struct->saved_max_active is added which stores the
  specified max_active on initialization.

* On freeze, all cwq->max_active's are quenched to zero.  Freezing is
  complete when nr_active on all cwqs reach zero.

* On thaw, all cwq->max_active's are restored to wq->saved_max_active
  and the worklist is repopulated.

This new implementation allows having single shared pool of workers
per cpu.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 1e19ffc6
...@@ -340,4 +340,11 @@ static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) ...@@ -340,4 +340,11 @@ static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
#else #else
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER
extern void freeze_workqueues_begin(void);
extern bool freeze_workqueues_busy(void);
extern void thaw_workqueues(void);
#endif /* CONFIG_FREEZER */
#endif #endif
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/workqueue.h>
/* /*
* Timeout for stopping processes * Timeout for stopping processes
...@@ -35,6 +36,7 @@ static int try_to_freeze_tasks(bool sig_only) ...@@ -35,6 +36,7 @@ static int try_to_freeze_tasks(bool sig_only)
struct task_struct *g, *p; struct task_struct *g, *p;
unsigned long end_time; unsigned long end_time;
unsigned int todo; unsigned int todo;
bool wq_busy = false;
struct timeval start, end; struct timeval start, end;
u64 elapsed_csecs64; u64 elapsed_csecs64;
unsigned int elapsed_csecs; unsigned int elapsed_csecs;
...@@ -42,6 +44,10 @@ static int try_to_freeze_tasks(bool sig_only) ...@@ -42,6 +44,10 @@ static int try_to_freeze_tasks(bool sig_only)
do_gettimeofday(&start); do_gettimeofday(&start);
end_time = jiffies + TIMEOUT; end_time = jiffies + TIMEOUT;
if (!sig_only)
freeze_workqueues_begin();
while (true) { while (true) {
todo = 0; todo = 0;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
...@@ -63,6 +69,12 @@ static int try_to_freeze_tasks(bool sig_only) ...@@ -63,6 +69,12 @@ static int try_to_freeze_tasks(bool sig_only)
todo++; todo++;
} while_each_thread(g, p); } while_each_thread(g, p);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
if (!sig_only) {
wq_busy = freeze_workqueues_busy();
todo += wq_busy;
}
if (!todo || time_after(jiffies, end_time)) if (!todo || time_after(jiffies, end_time))
break; break;
...@@ -86,8 +98,12 @@ static int try_to_freeze_tasks(bool sig_only) ...@@ -86,8 +98,12 @@ static int try_to_freeze_tasks(bool sig_only)
*/ */
printk("\n"); printk("\n");
printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
"(%d tasks refusing to freeze):\n", "(%d tasks refusing to freeze, wq_busy=%d):\n",
elapsed_csecs / 100, elapsed_csecs % 100, todo); elapsed_csecs / 100, elapsed_csecs % 100,
todo - wq_busy, wq_busy);
thaw_workqueues();
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
do_each_thread(g, p) { do_each_thread(g, p) {
task_lock(p); task_lock(p);
...@@ -157,6 +173,7 @@ void thaw_processes(void) ...@@ -157,6 +173,7 @@ void thaw_processes(void)
oom_killer_enable(); oom_killer_enable();
printk("Restarting tasks ... "); printk("Restarting tasks ... ");
thaw_workqueues();
thaw_tasks(true); thaw_tasks(true);
thaw_tasks(false); thaw_tasks(false);
schedule(); schedule();
......
...@@ -78,7 +78,7 @@ struct cpu_workqueue_struct { ...@@ -78,7 +78,7 @@ struct cpu_workqueue_struct {
int nr_in_flight[WORK_NR_COLORS]; int nr_in_flight[WORK_NR_COLORS];
/* L: nr of in_flight works */ /* L: nr of in_flight works */
int nr_active; /* L: nr of active works */ int nr_active; /* L: nr of active works */
int max_active; /* I: max active works */ int max_active; /* L: max active works */
struct list_head delayed_works; /* L: delayed works */ struct list_head delayed_works; /* L: delayed works */
}; };
...@@ -108,6 +108,7 @@ struct workqueue_struct { ...@@ -108,6 +108,7 @@ struct workqueue_struct {
struct list_head flusher_queue; /* F: flush waiters */ struct list_head flusher_queue; /* F: flush waiters */
struct list_head flusher_overflow; /* F: flush overflow list */ struct list_head flusher_overflow; /* F: flush overflow list */
int saved_max_active; /* I: saved cwq max_active */
const char *name; /* I: workqueue name */ const char *name; /* I: workqueue name */
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map; struct lockdep_map lockdep_map;
...@@ -228,6 +229,7 @@ static inline void debug_work_deactivate(struct work_struct *work) { } ...@@ -228,6 +229,7 @@ static inline void debug_work_deactivate(struct work_struct *work) { }
static DEFINE_SPINLOCK(workqueue_lock); static DEFINE_SPINLOCK(workqueue_lock);
static LIST_HEAD(workqueues); static LIST_HEAD(workqueues);
static DEFINE_PER_CPU(struct ida, worker_ida); static DEFINE_PER_CPU(struct ida, worker_ida);
static bool workqueue_freezing; /* W: have wqs started freezing? */
static int worker_thread(void *__worker); static int worker_thread(void *__worker);
...@@ -745,19 +747,13 @@ static int worker_thread(void *__worker) ...@@ -745,19 +747,13 @@ static int worker_thread(void *__worker)
struct cpu_workqueue_struct *cwq = worker->cwq; struct cpu_workqueue_struct *cwq = worker->cwq;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
if (cwq->wq->flags & WQ_FREEZEABLE)
set_freezable();
for (;;) { for (;;) {
prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
if (!freezing(current) && if (!kthread_should_stop() &&
!kthread_should_stop() &&
list_empty(&cwq->worklist)) list_empty(&cwq->worklist))
schedule(); schedule();
finish_wait(&cwq->more_work, &wait); finish_wait(&cwq->more_work, &wait);
try_to_freeze();
if (kthread_should_stop()) if (kthread_should_stop())
break; break;
...@@ -1553,6 +1549,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, ...@@ -1553,6 +1549,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
goto err; goto err;
wq->flags = flags; wq->flags = flags;
wq->saved_max_active = max_active;
mutex_init(&wq->flush_mutex); mutex_init(&wq->flush_mutex);
atomic_set(&wq->nr_cwqs_to_flush, 0); atomic_set(&wq->nr_cwqs_to_flush, 0);
INIT_LIST_HEAD(&wq->flusher_queue); INIT_LIST_HEAD(&wq->flusher_queue);
...@@ -1591,8 +1588,19 @@ struct workqueue_struct *__create_workqueue_key(const char *name, ...@@ -1591,8 +1588,19 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
failed = true; failed = true;
} }
/*
* workqueue_lock protects global freeze state and workqueues
* list. Grab it, set max_active accordingly and add the new
* workqueue to workqueues list.
*/
spin_lock(&workqueue_lock); spin_lock(&workqueue_lock);
if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
for_each_possible_cpu(cpu)
get_cwq(cpu, wq)->max_active = 0;
list_add(&wq->list, &workqueues); list_add(&wq->list, &workqueues);
spin_unlock(&workqueue_lock); spin_unlock(&workqueue_lock);
cpu_maps_update_done(); cpu_maps_update_done();
...@@ -1621,14 +1629,18 @@ void destroy_workqueue(struct workqueue_struct *wq) ...@@ -1621,14 +1629,18 @@ void destroy_workqueue(struct workqueue_struct *wq)
{ {
int cpu; int cpu;
flush_workqueue(wq);
/*
* wq list is used to freeze wq, remove from list after
* flushing is complete in case freeze races us.
*/
cpu_maps_update_begin(); cpu_maps_update_begin();
spin_lock(&workqueue_lock); spin_lock(&workqueue_lock);
list_del(&wq->list); list_del(&wq->list);
spin_unlock(&workqueue_lock); spin_unlock(&workqueue_lock);
cpu_maps_update_done(); cpu_maps_update_done();
flush_workqueue(wq);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
int i; int i;
...@@ -1722,6 +1734,137 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) ...@@ -1722,6 +1734,137 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
EXPORT_SYMBOL_GPL(work_on_cpu); EXPORT_SYMBOL_GPL(work_on_cpu);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER
/**
* freeze_workqueues_begin - begin freezing workqueues
*
* Start freezing workqueues. After this function returns, all
* freezeable workqueues will queue new works to their frozen_works
* list instead of the cwq ones.
*
* CONTEXT:
* Grabs and releases workqueue_lock and cwq->lock's.
*/
void freeze_workqueues_begin(void)
{
struct workqueue_struct *wq;
unsigned int cpu;
spin_lock(&workqueue_lock);
BUG_ON(workqueue_freezing);
workqueue_freezing = true;
for_each_possible_cpu(cpu) {
list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
spin_lock_irq(&cwq->lock);
if (wq->flags & WQ_FREEZEABLE)
cwq->max_active = 0;
spin_unlock_irq(&cwq->lock);
}
}
spin_unlock(&workqueue_lock);
}
/**
* freeze_workqueues_busy - are freezeable workqueues still busy?
*
* Check whether freezing is complete. This function must be called
* between freeze_workqueues_begin() and thaw_workqueues().
*
* CONTEXT:
* Grabs and releases workqueue_lock.
*
* RETURNS:
* %true if some freezeable workqueues are still busy. %false if
* freezing is complete.
*/
bool freeze_workqueues_busy(void)
{
struct workqueue_struct *wq;
unsigned int cpu;
bool busy = false;
spin_lock(&workqueue_lock);
BUG_ON(!workqueue_freezing);
for_each_possible_cpu(cpu) {
/*
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (!(wq->flags & WQ_FREEZEABLE))
continue;
BUG_ON(cwq->nr_active < 0);
if (cwq->nr_active) {
busy = true;
goto out_unlock;
}
}
}
out_unlock:
spin_unlock(&workqueue_lock);
return busy;
}
/**
* thaw_workqueues - thaw workqueues
*
* Thaw workqueues. Normal queueing is restored and all collected
* frozen works are transferred to their respective cwq worklists.
*
* CONTEXT:
* Grabs and releases workqueue_lock and cwq->lock's.
*/
void thaw_workqueues(void)
{
struct workqueue_struct *wq;
unsigned int cpu;
spin_lock(&workqueue_lock);
if (!workqueue_freezing)
goto out_unlock;
for_each_possible_cpu(cpu) {
list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (!(wq->flags & WQ_FREEZEABLE))
continue;
spin_lock_irq(&cwq->lock);
/* restore max_active and repopulate worklist */
cwq->max_active = wq->saved_max_active;
while (!list_empty(&cwq->delayed_works) &&
cwq->nr_active < cwq->max_active)
cwq_activate_first_delayed(cwq);
wake_up(&cwq->more_work);
spin_unlock_irq(&cwq->lock);
}
}
workqueue_freezing = false;
out_unlock:
spin_unlock(&workqueue_lock);
}
#endif /* CONFIG_FREEZER */
void __init init_workqueues(void) void __init init_workqueues(void)
{ {
unsigned int cpu; unsigned int cpu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment