Commit 56450649 authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Paul E. McKenney

rcu/context-tracking: Move deferred nocb resched to context tracking

To prepare for migrating the RCU eqs accounting code to context tracking,
split the last-resort deferred nocb resched from rcu_user_enter() and
move it into a separate call from context tracking.
Acked-by: default avatarPaul E. McKenney <paulmck@kernel.org>
Signed-off-by: default avatarFrederic Weisbecker <frederic@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Cc: Uladzislau Rezki <uladzislau.rezki@sony.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Nicolas Saenz Julienne <nsaenz@kernel.org>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Xiongfeng Wang <wangxiongfeng2@huawei.com>
Cc: Yu Liao <liaoyu15@huawei.com>
Cc: Phil Auld <pauld@redhat.com>
Cc: Paul Gortmaker<paul.gortmaker@windriver.com>
Cc: Alex Belits <abelits@marvell.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
Reviewed-by: default avatarNicolas Saenz Julienne <nsaenzju@redhat.com>
Tested-by: default avatarNicolas Saenz Julienne <nsaenzju@redhat.com>
parent 95e04f48
...@@ -112,6 +112,12 @@ static inline void rcu_user_enter(void) { } ...@@ -112,6 +112,12 @@ static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { } static inline void rcu_user_exit(void) { }
#endif /* CONFIG_NO_HZ_FULL */ #endif /* CONFIG_NO_HZ_FULL */
#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
void rcu_irq_work_resched(void);
#else
static inline void rcu_irq_work_resched(void) { }
#endif
#ifdef CONFIG_RCU_NOCB_CPU #ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void); void rcu_init_nohz(void);
int rcu_nocb_cpu_offload(int cpu); int rcu_nocb_cpu_offload(int cpu);
......
...@@ -177,6 +177,8 @@ static __always_inline void context_tracking_recursion_exit(void) ...@@ -177,6 +177,8 @@ static __always_inline void context_tracking_recursion_exit(void)
*/ */
void noinstr __ct_user_enter(enum ctx_state state) void noinstr __ct_user_enter(enum ctx_state state)
{ {
lockdep_assert_irqs_disabled();
/* Kernel threads aren't supposed to go to userspace */ /* Kernel threads aren't supposed to go to userspace */
WARN_ON_ONCE(!current->mm); WARN_ON_ONCE(!current->mm);
...@@ -198,6 +200,12 @@ void noinstr __ct_user_enter(enum ctx_state state) ...@@ -198,6 +200,12 @@ void noinstr __ct_user_enter(enum ctx_state state)
vtime_user_enter(current); vtime_user_enter(current);
instrumentation_end(); instrumentation_end();
} }
/*
* Other than generic entry implementation, we may be past the last
* rescheduling opportunity in the entry code. Trigger a self IPI
* that will fire and reschedule once we resume in user/guest mode.
*/
rcu_irq_work_resched();
rcu_user_enter(); rcu_user_enter();
} }
/* /*
......
...@@ -681,7 +681,7 @@ static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) = ...@@ -681,7 +681,7 @@ static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
* last resort is to fire a local irq_work that will trigger a reschedule once IRQs * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
* get re-enabled again. * get re-enabled again.
*/ */
noinstr static void rcu_irq_work_resched(void) noinstr void rcu_irq_work_resched(void)
{ {
struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
...@@ -697,10 +697,7 @@ noinstr static void rcu_irq_work_resched(void) ...@@ -697,10 +697,7 @@ noinstr static void rcu_irq_work_resched(void)
} }
instrumentation_end(); instrumentation_end();
} }
#endif /* #if !defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK) */
#else
static inline void rcu_irq_work_resched(void) { }
#endif
/** /**
* rcu_user_enter - inform RCU that we are resuming userspace. * rcu_user_enter - inform RCU that we are resuming userspace.
...@@ -715,14 +712,6 @@ static inline void rcu_irq_work_resched(void) { } ...@@ -715,14 +712,6 @@ static inline void rcu_irq_work_resched(void) { }
*/ */
noinstr void rcu_user_enter(void) noinstr void rcu_user_enter(void)
{ {
lockdep_assert_irqs_disabled();
/*
* Other than generic entry implementation, we may be past the last
* rescheduling opportunity in the entry code. Trigger a self IPI
* that will fire and reschedule once we resume in user/guest mode.
*/
rcu_irq_work_resched();
rcu_eqs_enter(true); rcu_eqs_enter(true);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment