Commit 3864caaf authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Paul E. McKenney

rcu/context-tracking: Remove rcu_irq_enter/exit()

Now rcu_irq_enter/exit() is an unnecessary middle call between
ct_irq_enter/exit() and nmi_irq_enter/exit(). Take this opportunity
to remove the former functions and move the comments above them to the
new entrypoints.
Signed-off-by: default avatarFrederic Weisbecker <frederic@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Cc: Uladzislau Rezki <uladzislau.rezki@sony.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Nicolas Saenz Julienne <nsaenz@kernel.org>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Xiongfeng Wang <wangxiongfeng2@huawei.com>
Cc: Yu Liao <liaoyu15@huawei.com>
Cc: Phil Auld <pauld@redhat.com>
Cc: Paul Gortmaker<paul.gortmaker@windriver.com>
Cc: Alex Belits <abelits@marvell.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
Reviewed-by: default avatarNicolas Saenz Julienne <nsaenzju@redhat.com>
Tested-by: default avatarNicolas Saenz Julienne <nsaenzju@redhat.com>
parent 493c1822
...@@ -78,10 +78,6 @@ static inline void rcu_cpu_stall_reset(void) { } ...@@ -78,10 +78,6 @@ static inline void rcu_cpu_stall_reset(void) { }
static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; } static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
static inline void rcu_idle_enter(void) { } static inline void rcu_idle_enter(void) { }
static inline void rcu_idle_exit(void) { } static inline void rcu_idle_exit(void) { }
static inline void rcu_irq_enter(void) { }
static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }
static inline void rcu_irq_exit_check_preempt(void) { } static inline void rcu_irq_exit_check_preempt(void) { }
#define rcu_is_idle_cpu(cpu) \ #define rcu_is_idle_cpu(cpu) \
(is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq()) (is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq())
......
...@@ -47,10 +47,6 @@ void cond_synchronize_rcu(unsigned long oldstate); ...@@ -47,10 +47,6 @@ void cond_synchronize_rcu(unsigned long oldstate);
void rcu_idle_enter(void); void rcu_idle_enter(void);
void rcu_idle_exit(void); void rcu_idle_exit(void);
void rcu_irq_enter(void);
void rcu_irq_exit(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);
bool rcu_is_idle_cpu(int cpu); bool rcu_is_idle_cpu(int cpu);
#ifdef CONFIG_PROVE_RCU #ifdef CONFIG_PROVE_RCU
......
...@@ -36,24 +36,87 @@ void ct_idle_exit(void) ...@@ -36,24 +36,87 @@ void ct_idle_exit(void)
} }
EXPORT_SYMBOL_GPL(ct_idle_exit); EXPORT_SYMBOL_GPL(ct_idle_exit);
/**
* ct_irq_enter - inform RCU that current CPU is entering irq away from idle
*
* Enter an interrupt handler, which might possibly result in exiting
* idle mode, in other words, entering the mode in which read-side critical
* sections can occur. The caller must have disabled interrupts.
*
* Note that the Linux kernel is fully capable of entering an interrupt
* handler that it never exits, for example when doing upcalls to user mode!
* This code assumes that the idle loop never does upcalls to user mode.
* If your architecture's idle loop does do upcalls to user mode (or does
* anything else that results in unbalanced calls to the irq_enter() and
* irq_exit() functions), RCU will give you what you deserve, good and hard.
* But very infrequently and irreproducibly.
*
* Use things like work queues to work around this limitation.
*
* You have been warned.
*
* If you add or remove a call to ct_irq_enter(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/
noinstr void ct_irq_enter(void) noinstr void ct_irq_enter(void)
{ {
rcu_irq_enter(); lockdep_assert_irqs_disabled();
ct_nmi_enter();
} }
/**
* ct_irq_exit - inform RCU that current CPU is exiting irq towards idle
*
* Exit from an interrupt handler, which might possibly result in entering
* idle mode, in other words, leaving the mode in which read-side critical
* sections can occur. The caller must have disabled interrupts.
*
* This code assumes that the idle loop never does anything that might
* result in unbalanced calls to irq_enter() and irq_exit(). If your
* architecture's idle loop violates this assumption, RCU will give you what
* you deserve, good and hard. But very infrequently and irreproducibly.
*
* Use things like work queues to work around this limitation.
*
* You have been warned.
*
* If you add or remove a call to ct_irq_exit(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/
noinstr void ct_irq_exit(void) noinstr void ct_irq_exit(void)
{ {
rcu_irq_exit(); lockdep_assert_irqs_disabled();
ct_nmi_exit();
} }
/*
* Wrapper for ct_irq_enter() where interrupts are enabled.
*
* If you add or remove a call to ct_irq_enter_irqson(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
*/
void ct_irq_enter_irqson(void) void ct_irq_enter_irqson(void)
{ {
rcu_irq_enter_irqson(); unsigned long flags;
local_irq_save(flags);
ct_irq_enter();
local_irq_restore(flags);
} }
/*
* Wrapper for ct_irq_exit() where interrupts are enabled.
*
* If you add or remove a call to ct_irq_exit_irqson(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
*/
void ct_irq_exit_irqson(void) void ct_irq_exit_irqson(void)
{ {
rcu_irq_exit_irqson(); unsigned long flags;
local_irq_save(flags);
ct_irq_exit();
local_irq_restore(flags);
} }
noinstr void ct_nmi_enter(void) noinstr void ct_nmi_enter(void)
......
...@@ -789,31 +789,6 @@ noinstr void rcu_nmi_exit(void) ...@@ -789,31 +789,6 @@ noinstr void rcu_nmi_exit(void)
rcu_dynticks_task_enter(); rcu_dynticks_task_enter();
} }
/**
* rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
*
* Exit from an interrupt handler, which might possibly result in entering
* idle mode, in other words, leaving the mode in which read-side critical
* sections can occur. The caller must have disabled interrupts.
*
* This code assumes that the idle loop never does anything that might
* result in unbalanced calls to irq_enter() and irq_exit(). If your
* architecture's idle loop violates this assumption, RCU will give you what
* you deserve, good and hard. But very infrequently and irreproducibly.
*
* Use things like work queues to work around this limitation.
*
* You have been warned.
*
* If you add or remove a call to rcu_irq_exit(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/
void noinstr rcu_irq_exit(void)
{
lockdep_assert_irqs_disabled();
rcu_nmi_exit();
}
#ifdef CONFIG_PROVE_RCU #ifdef CONFIG_PROVE_RCU
/** /**
* rcu_irq_exit_check_preempt - Validate that scheduling is possible * rcu_irq_exit_check_preempt - Validate that scheduling is possible
...@@ -832,21 +807,6 @@ void rcu_irq_exit_check_preempt(void) ...@@ -832,21 +807,6 @@ void rcu_irq_exit_check_preempt(void)
} }
#endif /* #ifdef CONFIG_PROVE_RCU */ #endif /* #ifdef CONFIG_PROVE_RCU */
/*
* Wrapper for rcu_irq_exit() where interrupts are enabled.
*
* If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
*/
void rcu_irq_exit_irqson(void)
{
unsigned long flags;
local_irq_save(flags);
rcu_irq_exit();
local_irq_restore(flags);
}
/* /*
* Exit an RCU extended quiescent state, which can be either the * Exit an RCU extended quiescent state, which can be either the
* idle loop or adaptive-tickless usermode execution. * idle loop or adaptive-tickless usermode execution.
...@@ -1041,49 +1001,6 @@ noinstr void rcu_nmi_enter(void) ...@@ -1041,49 +1001,6 @@ noinstr void rcu_nmi_enter(void)
barrier(); barrier();
} }
/**
* rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
*
* Enter an interrupt handler, which might possibly result in exiting
* idle mode, in other words, entering the mode in which read-side critical
* sections can occur. The caller must have disabled interrupts.
*
* Note that the Linux kernel is fully capable of entering an interrupt
* handler that it never exits, for example when doing upcalls to user mode!
* This code assumes that the idle loop never does upcalls to user mode.
* If your architecture's idle loop does do upcalls to user mode (or does
* anything else that results in unbalanced calls to the irq_enter() and
* irq_exit() functions), RCU will give you what you deserve, good and hard.
* But very infrequently and irreproducibly.
*
* Use things like work queues to work around this limitation.
*
* You have been warned.
*
* If you add or remove a call to rcu_irq_enter(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/
noinstr void rcu_irq_enter(void)
{
lockdep_assert_irqs_disabled();
rcu_nmi_enter();
}
/*
* Wrapper for rcu_irq_enter() where interrupts are enabled.
*
* If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
*/
void rcu_irq_enter_irqson(void)
{
unsigned long flags;
local_irq_save(flags);
rcu_irq_enter();
local_irq_restore(flags);
}
/* /*
* Check to see if any future non-offloaded RCU-related work will need * Check to see if any future non-offloaded RCU-related work will need
* to be done by the current CPU, even if none need be done immediately, * to be done by the current CPU, even if none need be done immediately,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment