Commit cb2c84b3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'wq-for-6.11-rc4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue fixes from Tejun Heo:
 "Nothing too interesting. One patch to remove spurious warning and
  others to address static checker warnings"

* tag 'wq-for-6.11-rc4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: Correct declaration of cpu_pwq in struct workqueue_struct
  workqueue: Fix spruious data race in __flush_work()
  workqueue: Remove incorrect "WARN_ON_ONCE(!list_empty(&worker->entry));" from dying worker
  workqueue: Fix UBSAN 'subtraction overflow' error in shift_and_mask()
  workqueue: doc: Fix function name, remove markers
parents 5bd6cf00 c4c8f369
...@@ -260,7 +260,7 @@ Some users depend on strict execution ordering where only one work item ...@@ -260,7 +260,7 @@ Some users depend on strict execution ordering where only one work item
is in flight at any given time and the work items are processed in is in flight at any given time and the work items are processed in
queueing order. While the combination of ``@max_active`` of 1 and queueing order. While the combination of ``@max_active`` of 1 and
``WQ_UNBOUND`` used to achieve this behavior, this is no longer the ``WQ_UNBOUND`` used to achieve this behavior, this is no longer the
case. Use ``alloc_ordered_queue()`` instead. case. Use alloc_ordered_workqueue() instead.
Example Execution Scenarios Example Execution Scenarios
......
...@@ -377,7 +377,7 @@ struct workqueue_struct { ...@@ -377,7 +377,7 @@ struct workqueue_struct {
/* hot fields used during command issue, aligned to cacheline */ /* hot fields used during command issue, aligned to cacheline */
unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */ struct pool_workqueue __rcu * __percpu *cpu_pwq; /* I: per-cpu pwqs */
struct wq_node_nr_active *node_nr_active[]; /* I: per-node nr_active */ struct wq_node_nr_active *node_nr_active[]; /* I: per-node nr_active */
}; };
...@@ -897,7 +897,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work) ...@@ -897,7 +897,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
static unsigned long shift_and_mask(unsigned long v, u32 shift, u32 bits) static unsigned long shift_and_mask(unsigned long v, u32 shift, u32 bits)
{ {
return (v >> shift) & ((1 << bits) - 1); return (v >> shift) & ((1U << bits) - 1);
} }
static void work_offqd_unpack(struct work_offq_data *offqd, unsigned long data) static void work_offqd_unpack(struct work_offq_data *offqd, unsigned long data)
...@@ -3351,7 +3351,6 @@ static int worker_thread(void *__worker) ...@@ -3351,7 +3351,6 @@ static int worker_thread(void *__worker)
set_pf_worker(false); set_pf_worker(false);
ida_free(&pool->worker_ida, worker->id); ida_free(&pool->worker_ida, worker->id);
WARN_ON_ONCE(!list_empty(&worker->entry));
return 0; return 0;
} }
...@@ -4167,7 +4166,6 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, ...@@ -4167,7 +4166,6 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
static bool __flush_work(struct work_struct *work, bool from_cancel) static bool __flush_work(struct work_struct *work, bool from_cancel)
{ {
struct wq_barrier barr; struct wq_barrier barr;
unsigned long data;
if (WARN_ON(!wq_online)) if (WARN_ON(!wq_online))
return false; return false;
...@@ -4185,29 +4183,35 @@ static bool __flush_work(struct work_struct *work, bool from_cancel) ...@@ -4185,29 +4183,35 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
* was queued on a BH workqueue, we also know that it was running in the * was queued on a BH workqueue, we also know that it was running in the
* BH context and thus can be busy-waited. * BH context and thus can be busy-waited.
*/ */
data = *work_data_bits(work); if (from_cancel) {
if (from_cancel && unsigned long data = *work_data_bits(work);
!WARN_ON_ONCE(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_BH)) {
/* if (!WARN_ON_ONCE(data & WORK_STRUCT_PWQ) &&
* On RT, prevent a live lock when %current preempted soft (data & WORK_OFFQ_BH)) {
* interrupt processing or prevents ksoftirqd from running by /*
* keeping flipping BH. If the BH work item runs on a different * On RT, prevent a live lock when %current preempted
* CPU then this has no effect other than doing the BH * soft interrupt processing or prevents ksoftirqd from
* disable/enable dance for nothing. This is copied from * running by keeping flipping BH. If the BH work item
* kernel/softirq.c::tasklet_unlock_spin_wait(). * runs on a different CPU then this has no effect other
*/ * than doing the BH disable/enable dance for nothing.
while (!try_wait_for_completion(&barr.done)) { * This is copied from
if (IS_ENABLED(CONFIG_PREEMPT_RT)) { * kernel/softirq.c::tasklet_unlock_spin_wait().
local_bh_disable(); */
local_bh_enable(); while (!try_wait_for_completion(&barr.done)) {
} else { if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
cpu_relax(); local_bh_disable();
local_bh_enable();
} else {
cpu_relax();
}
} }
goto out_destroy;
} }
} else {
wait_for_completion(&barr.done);
} }
wait_for_completion(&barr.done);
out_destroy:
destroy_work_on_stack(&barr.work); destroy_work_on_stack(&barr.work);
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment