Commit 72dd379e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-4.15-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue fixes from Tejun Heo:

 - Lai's hotplug simplifications inadvertently fix a possible deadlock
   involving cpuset and workqueue

 - CPU isolation fix which was reverted due to the changes in the
   housekeeping code resurrected

 - A trivial unused include removal

* 'for-4.15-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: remove unneeded kallsyms include
  workqueue/hotplug: remove the workaround in rebind_workers()
  workqueue/hotplug: simplify workqueue_offline_cpu()
  workqueue: respect isolated cpus when queueing an unbound work
  main: kernel_start: move housekeeping_init() before workqueue_init_early()
parents a83cb7e6 01dfee95
...@@ -588,6 +588,12 @@ asmlinkage __visible void __init start_kernel(void) ...@@ -588,6 +588,12 @@ asmlinkage __visible void __init start_kernel(void)
local_irq_disable(); local_irq_disable();
radix_tree_init(); radix_tree_init();
/*
* Set up housekeeping before setting up workqueues to allow the unbound
* workqueue to take non-housekeeping into account.
*/
housekeeping_init();
/* /*
* Allow workqueue creation and work item queueing/cancelling * Allow workqueue creation and work item queueing/cancelling
* early. Work item execution depends on kthreads and starts after * early. Work item execution depends on kthreads and starts after
...@@ -605,7 +611,6 @@ asmlinkage __visible void __init start_kernel(void) ...@@ -605,7 +611,6 @@ asmlinkage __visible void __init start_kernel(void)
early_irq_init(); early_irq_init();
init_IRQ(); init_IRQ();
tick_init(); tick_init();
housekeeping_init();
rcu_init_nohz(); rcu_init_nohz();
init_timers(); init_timers();
hrtimers_init(); hrtimers_init();
......
...@@ -38,7 +38,6 @@ ...@@ -38,7 +38,6 @@
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/mempolicy.h> #include <linux/mempolicy.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/kallsyms.h>
#include <linux/debug_locks.h> #include <linux/debug_locks.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/idr.h> #include <linux/idr.h>
...@@ -48,6 +47,7 @@ ...@@ -48,6 +47,7 @@
#include <linux/nodemask.h> #include <linux/nodemask.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/sched/isolation.h>
#include "workqueue_internal.h" #include "workqueue_internal.h"
...@@ -1634,7 +1634,7 @@ static void worker_enter_idle(struct worker *worker) ...@@ -1634,7 +1634,7 @@ static void worker_enter_idle(struct worker *worker)
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
/* /*
* Sanity check nr_running. Because wq_unbind_fn() releases * Sanity check nr_running. Because unbind_workers() releases
* pool->lock between setting %WORKER_UNBOUND and zapping * pool->lock between setting %WORKER_UNBOUND and zapping
* nr_running, the warning may trigger spuriously. Check iff * nr_running, the warning may trigger spuriously. Check iff
* unbind is not in progress. * unbind is not in progress.
...@@ -4510,9 +4510,8 @@ void show_workqueue_state(void) ...@@ -4510,9 +4510,8 @@ void show_workqueue_state(void)
* cpu comes back online. * cpu comes back online.
*/ */
static void wq_unbind_fn(struct work_struct *work) static void unbind_workers(int cpu)
{ {
int cpu = smp_processor_id();
struct worker_pool *pool; struct worker_pool *pool;
struct worker *worker; struct worker *worker;
...@@ -4589,16 +4588,6 @@ static void rebind_workers(struct worker_pool *pool) ...@@ -4589,16 +4588,6 @@ static void rebind_workers(struct worker_pool *pool)
spin_lock_irq(&pool->lock); spin_lock_irq(&pool->lock);
/*
* XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
* w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is
* being reworked and this can go away in time.
*/
if (!(pool->flags & POOL_DISASSOCIATED)) {
spin_unlock_irq(&pool->lock);
return;
}
pool->flags &= ~POOL_DISASSOCIATED; pool->flags &= ~POOL_DISASSOCIATED;
for_each_pool_worker(worker, pool) { for_each_pool_worker(worker, pool) {
...@@ -4709,12 +4698,13 @@ int workqueue_online_cpu(unsigned int cpu) ...@@ -4709,12 +4698,13 @@ int workqueue_online_cpu(unsigned int cpu)
int workqueue_offline_cpu(unsigned int cpu) int workqueue_offline_cpu(unsigned int cpu)
{ {
struct work_struct unbind_work;
struct workqueue_struct *wq; struct workqueue_struct *wq;
/* unbinding per-cpu workers should happen on the local CPU */ /* unbinding per-cpu workers should happen on the local CPU */
INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); if (WARN_ON(cpu != smp_processor_id()))
queue_work_on(cpu, system_highpri_wq, &unbind_work); return -1;
unbind_workers(cpu);
/* update NUMA affinity of unbound workqueues */ /* update NUMA affinity of unbound workqueues */
mutex_lock(&wq_pool_mutex); mutex_lock(&wq_pool_mutex);
...@@ -4722,9 +4712,6 @@ int workqueue_offline_cpu(unsigned int cpu) ...@@ -4722,9 +4712,6 @@ int workqueue_offline_cpu(unsigned int cpu)
wq_update_unbound_numa(wq, cpu, false); wq_update_unbound_numa(wq, cpu, false);
mutex_unlock(&wq_pool_mutex); mutex_unlock(&wq_pool_mutex);
/* wait for per-cpu unbinding to finish */
flush_work(&unbind_work);
destroy_work_on_stack(&unbind_work);
return 0; return 0;
} }
...@@ -4957,6 +4944,10 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) ...@@ -4957,6 +4944,10 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
return -ENOMEM; return -ENOMEM;
/*
* Not excluding isolated cpus on purpose.
* If the user wishes to include them, we allow that.
*/
cpumask_and(cpumask, cpumask, cpu_possible_mask); cpumask_and(cpumask, cpumask, cpu_possible_mask);
if (!cpumask_empty(cpumask)) { if (!cpumask_empty(cpumask)) {
apply_wqattrs_lock(); apply_wqattrs_lock();
...@@ -5555,7 +5546,7 @@ int __init workqueue_init_early(void) ...@@ -5555,7 +5546,7 @@ int __init workqueue_init_early(void)
WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_DOMAIN));
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment