Commit ec433f0c authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Paul E. McKenney

softirq,rcu: Inform RCU of irq_exit() activity

The rcu_read_unlock_special() function relies on in_irq() to exclude
scheduler activity from interrupt level.  This fails because exit_irq()
can invoke the scheduler after clearing the preempt_count() bits that
in_irq() uses to determine that it is at interrupt level.  This situation
can result in failures as follows:

 $task			IRQ		SoftIRQ

 rcu_read_lock()

 /* do stuff */

 <preempt> |= UNLOCK_BLOCKED

 rcu_read_unlock()
   --t->rcu_read_lock_nesting

			irq_enter();
			/* do stuff, don't use RCU */
			irq_exit();
			  sub_preempt_count(IRQ_EXIT_OFFSET);
			  invoke_softirq()

					ttwu();
					  spin_lock_irq(&pi->lock)
					  rcu_read_lock();
					  /* do stuff */
					  rcu_read_unlock();
					    rcu_read_unlock_special()
					      rcu_report_exp_rnp()
					        ttwu()
					          spin_lock_irq(&pi->lock) /* deadlock */

   rcu_read_unlock_special(t);

Ed can simply trigger this 'easy' because invoke_softirq() immediately
does a ttwu() of ksoftirqd/# instead of doing the in-place softirq stuff
first, but even without that the above happens.

Cure this by also excluding softirqs from the
rcu_read_unlock_special() handler and ensuring the force_irqthreads
ksoftirqd/# wakeup is done from full softirq context.

[ Alternatively, delaying the ->rcu_read_lock_nesting decrement
  until after the special handling would make the thing more robust
  in the face of interrupts as well.  And there is a separate patch
  for that. ]

Cc: Thomas Gleixner <tglx@linutronix.de>
Reported-and-tested-by: default avatarEd Tomlinson <edt@aei.ca>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent c5d753a5
...@@ -318,7 +318,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) ...@@ -318,7 +318,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
} }
/* Hardware IRQ handlers cannot block. */ /* Hardware IRQ handlers cannot block. */
if (in_irq()) { if (in_irq() || in_serving_softirq()) {
local_irq_restore(flags); local_irq_restore(flags);
return; return;
} }
......
...@@ -315,16 +315,24 @@ static inline void invoke_softirq(void) ...@@ -315,16 +315,24 @@ static inline void invoke_softirq(void)
{ {
if (!force_irqthreads) if (!force_irqthreads)
__do_softirq(); __do_softirq();
else else {
__local_bh_disable((unsigned long)__builtin_return_address(0),
SOFTIRQ_OFFSET);
wakeup_softirqd(); wakeup_softirqd();
__local_bh_enable(SOFTIRQ_OFFSET);
}
} }
#else #else
static inline void invoke_softirq(void) static inline void invoke_softirq(void)
{ {
if (!force_irqthreads) if (!force_irqthreads)
do_softirq(); do_softirq();
else else {
__local_bh_disable((unsigned long)__builtin_return_address(0),
SOFTIRQ_OFFSET);
wakeup_softirqd(); wakeup_softirqd();
__local_bh_enable(SOFTIRQ_OFFSET);
}
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment