Commit 30cdf249 authored by Tejun Heo's avatar Tejun Heo

workqueue: add workqueue_struct->pwqs list

Add workqueue_struct->pwqs list and chain all pool_workqueues
belonging to a workqueue there.  This will be used to implement
generic pool_workqueue iteration and handle multiple pool_workqueues
for the scheduled unbound pools with custom attributes.

This patch doesn't introduce any visible behavior changes.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reviewed-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
parent e904e6c2
...@@ -169,6 +169,7 @@ struct pool_workqueue { ...@@ -169,6 +169,7 @@ struct pool_workqueue {
int nr_active; /* L: nr of active works */ int nr_active; /* L: nr of active works */
int max_active; /* L: max active works */ int max_active; /* L: max active works */
struct list_head delayed_works; /* L: delayed works */ struct list_head delayed_works; /* L: delayed works */
struct list_head pwqs_node; /* I: node on wq->pwqs */
} __aligned(1 << WORK_STRUCT_FLAG_BITS); } __aligned(1 << WORK_STRUCT_FLAG_BITS);
/* /*
...@@ -212,6 +213,7 @@ struct workqueue_struct { ...@@ -212,6 +213,7 @@ struct workqueue_struct {
struct pool_workqueue *single; struct pool_workqueue *single;
unsigned long v; unsigned long v;
} pool_wq; /* I: pwq's */ } pool_wq; /* I: pwq's */
struct list_head pwqs; /* I: all pwqs of this wq */
struct list_head list; /* W: list of all workqueues */ struct list_head list; /* W: list of all workqueues */
struct mutex flush_mutex; /* protects wq flushing */ struct mutex flush_mutex; /* protects wq flushing */
...@@ -3096,14 +3098,32 @@ int keventd_up(void) ...@@ -3096,14 +3098,32 @@ int keventd_up(void)
return system_wq != NULL; return system_wq != NULL;
} }
static int alloc_pwqs(struct workqueue_struct *wq) static int alloc_and_link_pwqs(struct workqueue_struct *wq)
{ {
if (!(wq->flags & WQ_UNBOUND)) int cpu;
if (!(wq->flags & WQ_UNBOUND)) {
wq->pool_wq.pcpu = alloc_percpu(struct pool_workqueue); wq->pool_wq.pcpu = alloc_percpu(struct pool_workqueue);
else if (!wq->pool_wq.pcpu)
wq->pool_wq.single = kmem_cache_zalloc(pwq_cache, GFP_KERNEL); return -ENOMEM;
for_each_possible_cpu(cpu) {
struct pool_workqueue *pwq = get_pwq(cpu, wq);
return wq->pool_wq.v ? 0 : -ENOMEM; list_add_tail(&pwq->pwqs_node, &wq->pwqs);
}
} else {
struct pool_workqueue *pwq;
pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL);
if (!pwq)
return -ENOMEM;
wq->pool_wq.single = pwq;
list_add_tail(&pwq->pwqs_node, &wq->pwqs);
}
return 0;
} }
static void free_pwqs(struct workqueue_struct *wq) static void free_pwqs(struct workqueue_struct *wq)
...@@ -3165,13 +3185,14 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, ...@@ -3165,13 +3185,14 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
wq->saved_max_active = max_active; wq->saved_max_active = max_active;
mutex_init(&wq->flush_mutex); mutex_init(&wq->flush_mutex);
atomic_set(&wq->nr_pwqs_to_flush, 0); atomic_set(&wq->nr_pwqs_to_flush, 0);
INIT_LIST_HEAD(&wq->pwqs);
INIT_LIST_HEAD(&wq->flusher_queue); INIT_LIST_HEAD(&wq->flusher_queue);
INIT_LIST_HEAD(&wq->flusher_overflow); INIT_LIST_HEAD(&wq->flusher_overflow);
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
INIT_LIST_HEAD(&wq->list); INIT_LIST_HEAD(&wq->list);
if (alloc_pwqs(wq) < 0) if (alloc_and_link_pwqs(wq) < 0)
goto err; goto err;
for_each_pwq_cpu(cpu, wq) { for_each_pwq_cpu(cpu, wq) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment