Commit f02ae73a authored by Tejun Heo's avatar Tejun Heo

workqueue: drop "std" from cpu_std_worker_pools and for_each_std_worker_pool()

All per-cpu pools are standard, so there's no need to use both "cpu"
and "std" and for_each_std_worker_pool() is confusing in that it can
be used only for per-cpu pools.

* s/cpu_std_worker_pools/cpu_worker_pools/

* s/for_each_std_worker_pool()/for_each_cpu_worker_pool()/
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reviewed-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
parent 7a62c2c8
...@@ -252,9 +252,9 @@ EXPORT_SYMBOL_GPL(system_freezable_wq); ...@@ -252,9 +252,9 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
lockdep_is_held(&workqueue_lock), \ lockdep_is_held(&workqueue_lock), \
"sched RCU or workqueue lock should be held") "sched RCU or workqueue lock should be held")
#define for_each_std_worker_pool(pool, cpu) \ #define for_each_cpu_worker_pool(pool, cpu) \
for ((pool) = &per_cpu(cpu_std_worker_pools, cpu)[0]; \ for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
(pool) < &per_cpu(cpu_std_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
(pool)++) (pool)++)
#define for_each_busy_worker(worker, i, pool) \ #define for_each_busy_worker(worker, i, pool) \
...@@ -420,7 +420,7 @@ static bool workqueue_freezing; /* W: have wqs started freezing? */ ...@@ -420,7 +420,7 @@ static bool workqueue_freezing; /* W: have wqs started freezing? */
* POOL_DISASSOCIATED set, and their workers have WORKER_UNBOUND set. * POOL_DISASSOCIATED set, and their workers have WORKER_UNBOUND set.
*/ */
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
cpu_std_worker_pools); cpu_worker_pools);
/* /*
* idr of all pools. Modifications are protected by workqueue_lock. Read * idr of all pools. Modifications are protected by workqueue_lock. Read
...@@ -3342,7 +3342,7 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq) ...@@ -3342,7 +3342,7 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
struct pool_workqueue *pwq = struct pool_workqueue *pwq =
per_cpu_ptr(wq->cpu_pwqs, cpu); per_cpu_ptr(wq->cpu_pwqs, cpu);
struct worker_pool *cpu_pools = struct worker_pool *cpu_pools =
per_cpu(cpu_std_worker_pools, cpu); per_cpu(cpu_worker_pools, cpu);
pwq->pool = &cpu_pools[highpri]; pwq->pool = &cpu_pools[highpri];
list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs); list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs);
...@@ -3694,7 +3694,7 @@ static void wq_unbind_fn(struct work_struct *work) ...@@ -3694,7 +3694,7 @@ static void wq_unbind_fn(struct work_struct *work)
struct worker *worker; struct worker *worker;
int i; int i;
for_each_std_worker_pool(pool, cpu) { for_each_cpu_worker_pool(pool, cpu) {
WARN_ON_ONCE(cpu != smp_processor_id()); WARN_ON_ONCE(cpu != smp_processor_id());
mutex_lock(&pool->assoc_mutex); mutex_lock(&pool->assoc_mutex);
...@@ -3737,7 +3737,7 @@ static void wq_unbind_fn(struct work_struct *work) ...@@ -3737,7 +3737,7 @@ static void wq_unbind_fn(struct work_struct *work)
* unbound chain execution of pending work items if other workers * unbound chain execution of pending work items if other workers
* didn't already. * didn't already.
*/ */
for_each_std_worker_pool(pool, cpu) for_each_cpu_worker_pool(pool, cpu)
atomic_set(&pool->nr_running, 0); atomic_set(&pool->nr_running, 0);
} }
...@@ -3754,7 +3754,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, ...@@ -3754,7 +3754,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
switch (action & ~CPU_TASKS_FROZEN) { switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
for_each_std_worker_pool(pool, cpu) { for_each_cpu_worker_pool(pool, cpu) {
struct worker *worker; struct worker *worker;
if (pool->nr_workers) if (pool->nr_workers)
...@@ -3772,7 +3772,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, ...@@ -3772,7 +3772,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
case CPU_DOWN_FAILED: case CPU_DOWN_FAILED:
case CPU_ONLINE: case CPU_ONLINE:
for_each_std_worker_pool(pool, cpu) { for_each_cpu_worker_pool(pool, cpu) {
mutex_lock(&pool->assoc_mutex); mutex_lock(&pool->assoc_mutex);
spin_lock_irq(&pool->lock); spin_lock_irq(&pool->lock);
...@@ -4012,7 +4012,7 @@ static int __init init_workqueues(void) ...@@ -4012,7 +4012,7 @@ static int __init init_workqueues(void)
struct worker_pool *pool; struct worker_pool *pool;
i = 0; i = 0;
for_each_std_worker_pool(pool, cpu) { for_each_cpu_worker_pool(pool, cpu) {
BUG_ON(init_worker_pool(pool)); BUG_ON(init_worker_pool(pool));
pool->cpu = cpu; pool->cpu = cpu;
cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
...@@ -4027,7 +4027,7 @@ static int __init init_workqueues(void) ...@@ -4027,7 +4027,7 @@ static int __init init_workqueues(void)
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
struct worker_pool *pool; struct worker_pool *pool;
for_each_std_worker_pool(pool, cpu) { for_each_cpu_worker_pool(pool, cpu) {
struct worker *worker; struct worker *worker;
pool->flags &= ~POOL_DISASSOCIATED; pool->flags &= ~POOL_DISASSOCIATED;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment