Commit b63dc123 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.9-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue fix from Tejun Heo:
 "Lai's patch to fix highly unlikely but still possible workqueue stall
  during CPU hotunplug."

* 'for-3.9-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: fix possible pool stall bug in wq_unbind_fn()
parents 35f8c769 eb283428
...@@ -3447,28 +3447,34 @@ static void wq_unbind_fn(struct work_struct *work) ...@@ -3447,28 +3447,34 @@ static void wq_unbind_fn(struct work_struct *work)
spin_unlock_irq(&pool->lock); spin_unlock_irq(&pool->lock);
mutex_unlock(&pool->assoc_mutex); mutex_unlock(&pool->assoc_mutex);
}
/* /*
* Call schedule() so that we cross rq->lock and thus can guarantee * Call schedule() so that we cross rq->lock and thus can
* sched callbacks see the %WORKER_UNBOUND flag. This is necessary * guarantee sched callbacks see the %WORKER_UNBOUND flag.
* as scheduler callbacks may be invoked from other cpus. * This is necessary as scheduler callbacks may be invoked
*/ * from other cpus.
schedule(); */
schedule();
/* /*
* Sched callbacks are disabled now. Zap nr_running. After this, * Sched callbacks are disabled now. Zap nr_running.
* nr_running stays zero and need_more_worker() and keep_working() * After this, nr_running stays zero and need_more_worker()
* are always true as long as the worklist is not empty. Pools on * and keep_working() are always true as long as the
* @cpu now behave as unbound (in terms of concurrency management) * worklist is not empty. This pool now behaves as an
* pools which are served by workers tied to the CPU. * unbound (in terms of concurrency management) pool which
* * are served by workers tied to the pool.
* On return from this function, the current worker would trigger */
* unbound chain execution of pending work items if other workers
* didn't already.
*/
for_each_std_worker_pool(pool, cpu)
atomic_set(&pool->nr_running, 0); atomic_set(&pool->nr_running, 0);
/*
* With concurrency management just turned off, a busy
* worker blocking could lead to lengthy stalls. Kick off
* unbound chain execution of currently pending work items.
*/
spin_lock_irq(&pool->lock);
wake_up_worker(pool);
spin_unlock_irq(&pool->lock);
}
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment