Commit 5a791ea4 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

* 'for-3.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: separate out drain_workqueue() from destroy_workqueue()
  workqueue: remove cancel_rearming_delayed_work[queue]()
parents 8209f53d 9c5a2ba7
...@@ -501,16 +501,6 @@ Who: NeilBrown <neilb@suse.de> ...@@ -501,16 +501,6 @@ Who: NeilBrown <neilb@suse.de>
---------------------------- ----------------------------
What: cancel_rearming_delayed_work[queue]()
When: 2.6.39
Why: The functions have been superceded by cancel_delayed_work_sync()
quite some time ago. The conversion is trivial and there is no
in-kernel user left.
Who: Tejun Heo <tj@kernel.org>
----------------------------
What: Legacy, non-standard chassis intrusion detection interface. What: Legacy, non-standard chassis intrusion detection interface.
When: June 2011 When: June 2011
Why: The adm9240, w83792d and w83793 hardware monitoring drivers have Why: The adm9240, w83792d and w83793 hardware monitoring drivers have
......
...@@ -255,7 +255,7 @@ enum { ...@@ -255,7 +255,7 @@ enum {
WQ_HIGHPRI = 1 << 4, /* high priority */ WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
WQ_DYING = 1 << 6, /* internal: workqueue is dying */ WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */
WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */ WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
...@@ -355,6 +355,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, ...@@ -355,6 +355,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *work, unsigned long delay); struct delayed_work *work, unsigned long delay);
extern void flush_workqueue(struct workqueue_struct *wq); extern void flush_workqueue(struct workqueue_struct *wq);
extern void drain_workqueue(struct workqueue_struct *wq);
extern void flush_scheduled_work(void); extern void flush_scheduled_work(void);
extern int schedule_work(struct work_struct *work); extern int schedule_work(struct work_struct *work);
...@@ -412,21 +413,6 @@ static inline bool __cancel_delayed_work(struct delayed_work *work) ...@@ -412,21 +413,6 @@ static inline bool __cancel_delayed_work(struct delayed_work *work)
return ret; return ret;
} }
/* Obsolete. use cancel_delayed_work_sync() */
static inline __deprecated
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
struct delayed_work *work)
{
cancel_delayed_work_sync(work);
}
/* Obsolete. use cancel_delayed_work_sync() */
static inline __deprecated
void cancel_rearming_delayed_work(struct delayed_work *work)
{
cancel_delayed_work_sync(work);
}
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
{ {
......
...@@ -221,7 +221,7 @@ typedef unsigned long mayday_mask_t; ...@@ -221,7 +221,7 @@ typedef unsigned long mayday_mask_t;
* per-CPU workqueues: * per-CPU workqueues:
*/ */
struct workqueue_struct { struct workqueue_struct {
unsigned int flags; /* I: WQ_* flags */ unsigned int flags; /* W: WQ_* flags */
union { union {
struct cpu_workqueue_struct __percpu *pcpu; struct cpu_workqueue_struct __percpu *pcpu;
struct cpu_workqueue_struct *single; struct cpu_workqueue_struct *single;
...@@ -240,6 +240,7 @@ struct workqueue_struct { ...@@ -240,6 +240,7 @@ struct workqueue_struct {
mayday_mask_t mayday_mask; /* cpus requesting rescue */ mayday_mask_t mayday_mask; /* cpus requesting rescue */
struct worker *rescuer; /* I: rescue worker */ struct worker *rescuer; /* I: rescue worker */
int nr_drainers; /* W: drain in progress */
int saved_max_active; /* W: saved cwq max_active */ int saved_max_active; /* W: saved cwq max_active */
const char *name; /* I: workqueue name */ const char *name; /* I: workqueue name */
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
...@@ -990,7 +991,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, ...@@ -990,7 +991,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
debug_work_activate(work); debug_work_activate(work);
/* if dying, only works from the same workqueue are allowed */ /* if dying, only works from the same workqueue are allowed */
if (unlikely(wq->flags & WQ_DYING) && if (unlikely(wq->flags & WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq))) WARN_ON_ONCE(!is_chained_work(wq)))
return; return;
...@@ -2381,6 +2382,54 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -2381,6 +2382,54 @@ void flush_workqueue(struct workqueue_struct *wq)
} }
EXPORT_SYMBOL_GPL(flush_workqueue); EXPORT_SYMBOL_GPL(flush_workqueue);
/**
* drain_workqueue - drain a workqueue
* @wq: workqueue to drain
*
* Wait until the workqueue becomes empty. While draining is in progress,
* only chain queueing is allowed. IOW, only currently pending or running
* work items on @wq can queue further work items on it. @wq is flushed
* repeatedly until it becomes empty. The number of flushing is detemined
* by the depth of chaining and should be relatively short. Whine if it
* takes too long.
*/
void drain_workqueue(struct workqueue_struct *wq)
{
unsigned int flush_cnt = 0;
unsigned int cpu;
/*
* __queue_work() needs to test whether there are drainers, is much
* hotter than drain_workqueue() and already looks at @wq->flags.
* Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
*/
spin_lock(&workqueue_lock);
if (!wq->nr_drainers++)
wq->flags |= WQ_DRAINING;
spin_unlock(&workqueue_lock);
reflush:
flush_workqueue(wq);
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (!cwq->nr_active && list_empty(&cwq->delayed_works))
continue;
if (++flush_cnt == 10 ||
(flush_cnt % 100 == 0 && flush_cnt <= 1000))
pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
wq->name, flush_cnt);
goto reflush;
}
spin_lock(&workqueue_lock);
if (!--wq->nr_drainers)
wq->flags &= ~WQ_DRAINING;
spin_unlock(&workqueue_lock);
}
EXPORT_SYMBOL_GPL(drain_workqueue);
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
bool wait_executing) bool wait_executing)
{ {
...@@ -3009,34 +3058,10 @@ EXPORT_SYMBOL_GPL(__alloc_workqueue_key); ...@@ -3009,34 +3058,10 @@ EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
*/ */
void destroy_workqueue(struct workqueue_struct *wq) void destroy_workqueue(struct workqueue_struct *wq)
{ {
unsigned int flush_cnt = 0;
unsigned int cpu; unsigned int cpu;
/* /* drain it before proceeding with destruction */
* Mark @wq dying and drain all pending works. Once WQ_DYING is drain_workqueue(wq);
* set, only chain queueing is allowed. IOW, only currently
* pending or running work items on @wq can queue further work
* items on it. @wq is flushed repeatedly until it becomes empty.
* The number of flushing is detemined by the depth of chaining and
* should be relatively short. Whine if it takes too long.
*/
wq->flags |= WQ_DYING;
reflush:
flush_workqueue(wq);
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (!cwq->nr_active && list_empty(&cwq->delayed_works))
continue;
if (++flush_cnt == 10 ||
(flush_cnt % 100 == 0 && flush_cnt <= 1000))
printk(KERN_WARNING "workqueue %s: flush on "
"destruction isn't complete after %u tries\n",
wq->name, flush_cnt);
goto reflush;
}
/* /*
* wq list is used to freeze wq, remove from list after * wq list is used to freeze wq, remove from list after
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment