Commit 2a8ab0fb authored by Tejun Heo's avatar Tejun Heo

Merge branch 'workqueue/for-5.16-fixes' into workqueue/for-5.17

for-5.16-fixes contains two subtle race conditions which were introduced by
scheduler side code cleanups. The branch didn't get pushed out, so merge
into for-5.17.
parents 84f91c62 45c753f5
...@@ -864,8 +864,17 @@ void wq_worker_running(struct task_struct *task) ...@@ -864,8 +864,17 @@ void wq_worker_running(struct task_struct *task)
if (!worker->sleeping) if (!worker->sleeping)
return; return;
/*
* If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
* and the nr_running increment below, we may ruin the nr_running reset
* and leave with an unexpected pool->nr_running == 1 on the newly unbound
* pool. Protect against such race.
*/
preempt_disable();
if (!(worker->flags & WORKER_NOT_RUNNING)) if (!(worker->flags & WORKER_NOT_RUNNING))
atomic_inc(&worker->pool->nr_running); atomic_inc(&worker->pool->nr_running);
preempt_enable();
worker->sleeping = 0; worker->sleeping = 0;
} }
...@@ -898,6 +907,16 @@ void wq_worker_sleeping(struct task_struct *task) ...@@ -898,6 +907,16 @@ void wq_worker_sleeping(struct task_struct *task)
worker->sleeping = 1; worker->sleeping = 1;
raw_spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
/*
* Recheck in case unbind_workers() preempted us. We don't
* want to decrement nr_running after the worker is unbound
* and nr_running has been reset.
*/
if (worker->flags & WORKER_NOT_RUNNING) {
raw_spin_unlock_irq(&pool->lock);
return;
}
/* /*
* The counterpart of the following dec_and_test, implied mb, * The counterpart of the following dec_and_test, implied mb,
* worklist not empty test sequence is in insert_work(). * worklist not empty test sequence is in insert_work().
...@@ -1526,7 +1545,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1526,7 +1545,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
* @work: work to queue * @work: work to queue
* *
* We queue the work to a specific CPU, the caller must ensure it * We queue the work to a specific CPU, the caller must ensure it
* can't go away. * can't go away. Callers that fail to ensure that the specified
* CPU cannot go away will execute on a randomly chosen CPU.
* *
* Return: %false if @work was already on a queue, %true otherwise. * Return: %false if @work was already on a queue, %true otherwise.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment