Commit 9ee01e0f authored by Thomas Gleixner's avatar Thomas Gleixner

x86/entry: Clean up idtentry_enter/exit() leftovers

Now that everything is converted to conditional RCU handling remove
idtentry_enter/exit() and tidy up the conditional functions.

This does not remove rcu_irq_exit_preempt(), to avoid conflicts with the RCU
tree. Will be removed once all of this hits Linus's tree.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Acked-by: default avatarAndy Lutomirski <luto@kernel.org>
Link: https://lore.kernel.org/r/20200521202117.473597954@linutronix.de
parent fa95d7dc
...@@ -515,7 +515,6 @@ SYSCALL_DEFINE0(ni_syscall) ...@@ -515,7 +515,6 @@ SYSCALL_DEFINE0(ni_syscall)
* idtentry_enter_cond_rcu - Handle state tracking on idtentry with conditional * idtentry_enter_cond_rcu - Handle state tracking on idtentry with conditional
* RCU handling * RCU handling
* @regs: Pointer to pt_regs of interrupted context * @regs: Pointer to pt_regs of interrupted context
* @cond_rcu: Invoke rcu_irq_enter() only if RCU is not watching
* *
* Invokes: * Invokes:
* - lockdep irqflag state tracking as low level ASM entry disabled * - lockdep irqflag state tracking as low level ASM entry disabled
...@@ -545,14 +544,14 @@ SYSCALL_DEFINE0(ni_syscall) ...@@ -545,14 +544,14 @@ SYSCALL_DEFINE0(ni_syscall)
* The return value must be fed into the rcu_exit argument of * The return value must be fed into the rcu_exit argument of
* idtentry_exit_cond_rcu(). * idtentry_exit_cond_rcu().
*/ */
bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs, bool cond_rcu) bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
{ {
if (user_mode(regs)) { if (user_mode(regs)) {
enter_from_user_mode(); enter_from_user_mode();
return false; return false;
} }
if (!cond_rcu || !__rcu_is_watching()) { if (!__rcu_is_watching()) {
/* /*
* If RCU is not watching then the same careful * If RCU is not watching then the same careful
* sequence vs. lockdep and tracing is required * sequence vs. lockdep and tracing is required
...@@ -608,52 +607,44 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit) ...@@ -608,52 +607,44 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
if (user_mode(regs)) { if (user_mode(regs)) {
prepare_exit_to_usermode(regs); prepare_exit_to_usermode(regs);
} else if (regs->flags & X86_EFLAGS_IF) { } else if (regs->flags & X86_EFLAGS_IF) {
/*
* If RCU was not watching on entry this needs to be done
* carefully and needs the same ordering of lockdep/tracing
* and RCU as the return to user mode path.
*/
if (rcu_exit) {
instrumentation_begin();
/* Tell the tracer that IRET will enable interrupts */
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
instrumentation_end();
rcu_irq_exit();
lockdep_hardirqs_on(CALLER_ADDR0);
return;
}
instrumentation_begin();
/* Check kernel preemption, if enabled */ /* Check kernel preemption, if enabled */
if (IS_ENABLED(CONFIG_PREEMPTION)) { if (IS_ENABLED(CONFIG_PREEMPTION)) {
/*
* This needs to be done very carefully.
* idtentry_enter() invoked rcu_irq_enter(). This
* needs to be undone before scheduling.
*
* Preemption is disabled inside of RCU idle
* sections. When the task returns from
* preempt_schedule_irq(), RCU is still watching.
*
* rcu_irq_exit_preempt() has additional state
* checking if CONFIG_PROVE_RCU=y
*/
if (!preempt_count()) { if (!preempt_count()) {
/* Sanity check RCU and thread stack */
rcu_irq_exit_check_preempt();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
WARN_ON_ONCE(!on_thread_stack()); WARN_ON_ONCE(!on_thread_stack());
instrumentation_begin();
if (rcu_exit)
rcu_irq_exit_preempt();
if (need_resched()) if (need_resched())
preempt_schedule_irq(); preempt_schedule_irq();
/* Covers both tracing and lockdep */
trace_hardirqs_on();
instrumentation_end();
return;
} }
} }
/* /* Covers both tracing and lockdep */
* If preemption is disabled then this needs to be done trace_hardirqs_on();
* carefully with respect to RCU. The exception might come
* from a RCU idle section in the idle task due to the fact
* that safe_halt() enables interrupts. So this needs the
* same ordering of lockdep/tracing and RCU as the return
* to user mode path.
*/
instrumentation_begin();
/* Tell the tracer that IRET will enable interrupts */
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
instrumentation_end(); instrumentation_end();
if (rcu_exit)
rcu_irq_exit();
lockdep_hardirqs_on(CALLER_ADDR0);
} else { } else {
/* IRQ flags state is correct already. Just tell RCU. */ /*
* IRQ flags state is correct already. Just tell RCU if it
* was not watching on entry.
*/
if (rcu_exit) if (rcu_exit)
rcu_irq_exit(); rcu_irq_exit();
} }
......
...@@ -10,19 +10,9 @@ ...@@ -10,19 +10,9 @@
void idtentry_enter_user(struct pt_regs *regs); void idtentry_enter_user(struct pt_regs *regs);
void idtentry_exit_user(struct pt_regs *regs); void idtentry_exit_user(struct pt_regs *regs);
bool idtentry_enter_cond_rcu(struct pt_regs *regs, bool cond_rcu); bool idtentry_enter_cond_rcu(struct pt_regs *regs);
void idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit); void idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit);
static __always_inline void idtentry_enter(struct pt_regs *regs)
{
idtentry_enter_cond_rcu(regs, false);
}
static __always_inline void idtentry_exit(struct pt_regs *regs)
{
idtentry_exit_cond_rcu(regs, true);
}
/** /**
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points * DECLARE_IDTENTRY - Declare functions for simple IDT entry points
* No error code pushed by hardware * No error code pushed by hardware
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment