Commit fe99a4f4 authored by Julia Cartwright's avatar Julia Cartwright Committed by Thomas Gleixner

kthread: Convert worker lock to raw spinlock

In order to enable the queuing of kthread work items from hardirq context
even when PREEMPT_RT_FULL is enabled, convert the worker spin_lock to a
raw_spin_lock.

This is only acceptable to do because the work performed under the lock is
well-bounded and minimal.
Reported-by: default avatarSteffen Trumtrar <s.trumtrar@pengutronix.de>
Reported-by: default avatarTim Sander <tim@krieglstein.org>
Signed-off-by: default avatarJulia Cartwright <julia@ni.com>
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarSteffen Trumtrar <s.trumtrar@pengutronix.de>
Reviewed-by: default avatarPetr Mladek <pmladek@suse.com>
Cc: Guenter Roeck <linux@roeck-us.net>
Link: https://lkml.kernel.org/r/20190212162554.19779-1-bigeasy@linutronix.de
parent c89d92ed
...@@ -85,7 +85,7 @@ enum { ...@@ -85,7 +85,7 @@ enum {
struct kthread_worker { struct kthread_worker {
unsigned int flags; unsigned int flags;
spinlock_t lock; raw_spinlock_t lock;
struct list_head work_list; struct list_head work_list;
struct list_head delayed_work_list; struct list_head delayed_work_list;
struct task_struct *task; struct task_struct *task;
...@@ -106,7 +106,7 @@ struct kthread_delayed_work { ...@@ -106,7 +106,7 @@ struct kthread_delayed_work {
}; };
#define KTHREAD_WORKER_INIT(worker) { \ #define KTHREAD_WORKER_INIT(worker) { \
.lock = __SPIN_LOCK_UNLOCKED((worker).lock), \ .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \
.work_list = LIST_HEAD_INIT((worker).work_list), \ .work_list = LIST_HEAD_INIT((worker).work_list), \
.delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
} }
......
...@@ -599,7 +599,7 @@ void __kthread_init_worker(struct kthread_worker *worker, ...@@ -599,7 +599,7 @@ void __kthread_init_worker(struct kthread_worker *worker,
struct lock_class_key *key) struct lock_class_key *key)
{ {
memset(worker, 0, sizeof(struct kthread_worker)); memset(worker, 0, sizeof(struct kthread_worker));
spin_lock_init(&worker->lock); raw_spin_lock_init(&worker->lock);
lockdep_set_class_and_name(&worker->lock, key, name); lockdep_set_class_and_name(&worker->lock, key, name);
INIT_LIST_HEAD(&worker->work_list); INIT_LIST_HEAD(&worker->work_list);
INIT_LIST_HEAD(&worker->delayed_work_list); INIT_LIST_HEAD(&worker->delayed_work_list);
...@@ -641,21 +641,21 @@ int kthread_worker_fn(void *worker_ptr) ...@@ -641,21 +641,21 @@ int kthread_worker_fn(void *worker_ptr)
if (kthread_should_stop()) { if (kthread_should_stop()) {
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
spin_lock_irq(&worker->lock); raw_spin_lock_irq(&worker->lock);
worker->task = NULL; worker->task = NULL;
spin_unlock_irq(&worker->lock); raw_spin_unlock_irq(&worker->lock);
return 0; return 0;
} }
work = NULL; work = NULL;
spin_lock_irq(&worker->lock); raw_spin_lock_irq(&worker->lock);
if (!list_empty(&worker->work_list)) { if (!list_empty(&worker->work_list)) {
work = list_first_entry(&worker->work_list, work = list_first_entry(&worker->work_list,
struct kthread_work, node); struct kthread_work, node);
list_del_init(&work->node); list_del_init(&work->node);
} }
worker->current_work = work; worker->current_work = work;
spin_unlock_irq(&worker->lock); raw_spin_unlock_irq(&worker->lock);
if (work) { if (work) {
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
...@@ -812,12 +812,12 @@ bool kthread_queue_work(struct kthread_worker *worker, ...@@ -812,12 +812,12 @@ bool kthread_queue_work(struct kthread_worker *worker,
bool ret = false; bool ret = false;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&worker->lock, flags); raw_spin_lock_irqsave(&worker->lock, flags);
if (!queuing_blocked(worker, work)) { if (!queuing_blocked(worker, work)) {
kthread_insert_work(worker, work, &worker->work_list); kthread_insert_work(worker, work, &worker->work_list);
ret = true; ret = true;
} }
spin_unlock_irqrestore(&worker->lock, flags); raw_spin_unlock_irqrestore(&worker->lock, flags);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(kthread_queue_work); EXPORT_SYMBOL_GPL(kthread_queue_work);
...@@ -843,7 +843,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t) ...@@ -843,7 +843,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
if (WARN_ON_ONCE(!worker)) if (WARN_ON_ONCE(!worker))
return; return;
spin_lock(&worker->lock); raw_spin_lock(&worker->lock);
/* Work must not be used with >1 worker, see kthread_queue_work(). */ /* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker); WARN_ON_ONCE(work->worker != worker);
...@@ -852,7 +852,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t) ...@@ -852,7 +852,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
list_del_init(&work->node); list_del_init(&work->node);
kthread_insert_work(worker, work, &worker->work_list); kthread_insert_work(worker, work, &worker->work_list);
spin_unlock(&worker->lock); raw_spin_unlock(&worker->lock);
} }
EXPORT_SYMBOL(kthread_delayed_work_timer_fn); EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
...@@ -908,14 +908,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker, ...@@ -908,14 +908,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
unsigned long flags; unsigned long flags;
bool ret = false; bool ret = false;
spin_lock_irqsave(&worker->lock, flags); raw_spin_lock_irqsave(&worker->lock, flags);
if (!queuing_blocked(worker, work)) { if (!queuing_blocked(worker, work)) {
__kthread_queue_delayed_work(worker, dwork, delay); __kthread_queue_delayed_work(worker, dwork, delay);
ret = true; ret = true;
} }
spin_unlock_irqrestore(&worker->lock, flags); raw_spin_unlock_irqrestore(&worker->lock, flags);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
...@@ -951,7 +951,7 @@ void kthread_flush_work(struct kthread_work *work) ...@@ -951,7 +951,7 @@ void kthread_flush_work(struct kthread_work *work)
if (!worker) if (!worker)
return; return;
spin_lock_irq(&worker->lock); raw_spin_lock_irq(&worker->lock);
/* Work must not be used with >1 worker, see kthread_queue_work(). */ /* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker); WARN_ON_ONCE(work->worker != worker);
...@@ -963,7 +963,7 @@ void kthread_flush_work(struct kthread_work *work) ...@@ -963,7 +963,7 @@ void kthread_flush_work(struct kthread_work *work)
else else
noop = true; noop = true;
spin_unlock_irq(&worker->lock); raw_spin_unlock_irq(&worker->lock);
if (!noop) if (!noop)
wait_for_completion(&fwork.done); wait_for_completion(&fwork.done);
...@@ -996,9 +996,9 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork, ...@@ -996,9 +996,9 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
* any queuing is blocked by setting the canceling counter. * any queuing is blocked by setting the canceling counter.
*/ */
work->canceling++; work->canceling++;
spin_unlock_irqrestore(&worker->lock, *flags); raw_spin_unlock_irqrestore(&worker->lock, *flags);
del_timer_sync(&dwork->timer); del_timer_sync(&dwork->timer);
spin_lock_irqsave(&worker->lock, *flags); raw_spin_lock_irqsave(&worker->lock, *flags);
work->canceling--; work->canceling--;
} }
...@@ -1045,7 +1045,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, ...@@ -1045,7 +1045,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
unsigned long flags; unsigned long flags;
int ret = false; int ret = false;
spin_lock_irqsave(&worker->lock, flags); raw_spin_lock_irqsave(&worker->lock, flags);
/* Do not bother with canceling when never queued. */ /* Do not bother with canceling when never queued. */
if (!work->worker) if (!work->worker)
...@@ -1062,7 +1062,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, ...@@ -1062,7 +1062,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
fast_queue: fast_queue:
__kthread_queue_delayed_work(worker, dwork, delay); __kthread_queue_delayed_work(worker, dwork, delay);
out: out:
spin_unlock_irqrestore(&worker->lock, flags); raw_spin_unlock_irqrestore(&worker->lock, flags);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
...@@ -1076,7 +1076,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) ...@@ -1076,7 +1076,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
if (!worker) if (!worker)
goto out; goto out;
spin_lock_irqsave(&worker->lock, flags); raw_spin_lock_irqsave(&worker->lock, flags);
/* Work must not be used with >1 worker, see kthread_queue_work(). */ /* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker); WARN_ON_ONCE(work->worker != worker);
...@@ -1090,13 +1090,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) ...@@ -1090,13 +1090,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
* In the meantime, block any queuing by setting the canceling counter. * In the meantime, block any queuing by setting the canceling counter.
*/ */
work->canceling++; work->canceling++;
spin_unlock_irqrestore(&worker->lock, flags); raw_spin_unlock_irqrestore(&worker->lock, flags);
kthread_flush_work(work); kthread_flush_work(work);
spin_lock_irqsave(&worker->lock, flags); raw_spin_lock_irqsave(&worker->lock, flags);
work->canceling--; work->canceling--;
out_fast: out_fast:
spin_unlock_irqrestore(&worker->lock, flags); raw_spin_unlock_irqrestore(&worker->lock, flags);
out: out:
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment