Commit a86baa69 authored by Joel Fernandes (Google)'s avatar Joel Fernandes (Google) Committed by Paul E. McKenney

rcu: Remove special bit at the bottom of the ->dynticks counter

Commit b8c17e66 ("rcu: Maintain special bits at bottom of ->dynticks
counter") reserved a bit at the bottom of the ->dynticks counter to defer
flushing of TLBs, but this facility never has been used.  This commit
therefore removes this capability along with the rcu_eqs_special_set()
function used to trigger it.

Link: https://lore.kernel.org/linux-doc/CALCETrWNPOOdTrFabTDd=H7+wc6xJ9rJceg6OL1S0rTV5pfSsA@mail.gmail.com/Suggested-by: default avatarAndy Lutomirski <luto@kernel.org>
Signed-off-by: default avatar"Joel Fernandes (Google)" <joel@joelfernandes.org>
[ paulmck: Forward-port to v5.13-rc1. ]
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent dc87740c
......@@ -14,9 +14,6 @@
#include <asm/param.h> /* for HZ */
/* Never flag non-existent other CPUs! */
static inline bool rcu_eqs_special_set(int cpu) { return false; }
unsigned long get_state_synchronize_rcu(void);
unsigned long start_poll_synchronize_rcu(void);
bool poll_state_synchronize_rcu(unsigned long oldstate);
......
......@@ -74,17 +74,10 @@
/* Data structures. */
/*
* Steal a bit from the bottom of ->dynticks for idle entry/exit
* control. Initially this is for TLB flushing.
*/
#define RCU_DYNTICK_CTRL_MASK 0x1
#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
.dynticks_nesting = 1,
.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
.dynticks = ATOMIC_INIT(1),
#ifdef CONFIG_RCU_NOCB_CPU
.cblist.flags = SEGCBLIST_SOFTIRQ_ONLY,
#endif
......@@ -266,7 +259,6 @@ void rcu_softirq_qs(void)
*/
static noinstr void rcu_dynticks_eqs_enter(void)
{
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
int seq;
/*
......@@ -275,13 +267,9 @@ static noinstr void rcu_dynticks_eqs_enter(void)
* next idle sojourn.
*/
rcu_dynticks_task_trace_enter(); // Before ->dynticks update!
seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
seq = arch_atomic_inc_return(&this_cpu_ptr(&rcu_data)->dynticks);
// RCU is no longer watching. Better be in extended quiescent state!
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
(seq & RCU_DYNTICK_CTRL_CTR));
/* Better not have special action (TLB flush) pending! */
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
(seq & RCU_DYNTICK_CTRL_MASK));
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & 0x1));
}
/*
......@@ -291,7 +279,6 @@ static noinstr void rcu_dynticks_eqs_enter(void)
*/
static noinstr void rcu_dynticks_eqs_exit(void)
{
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
int seq;
/*
......@@ -299,15 +286,10 @@ static noinstr void rcu_dynticks_eqs_exit(void)
* and we also must force ordering with the next RCU read-side
* critical section.
*/
seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
seq = arch_atomic_inc_return(&this_cpu_ptr(&rcu_data)->dynticks);
// RCU is now watching. Better not be in an extended quiescent state!
rcu_dynticks_task_trace_exit(); // After ->dynticks update!
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
!(seq & RCU_DYNTICK_CTRL_CTR));
if (seq & RCU_DYNTICK_CTRL_MASK) {
arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
smp_mb__after_atomic(); /* _exit after clearing mask. */
}
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & 0x1));
}
/*
......@@ -324,9 +306,9 @@ static void rcu_dynticks_eqs_online(void)
{
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
if (atomic_read(&rdp->dynticks) & 0x1)
return;
atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
atomic_inc(&rdp->dynticks);
}
/*
......@@ -336,9 +318,7 @@ static void rcu_dynticks_eqs_online(void)
*/
static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
{
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
return !(arch_atomic_read(&this_cpu_ptr(&rcu_data)->dynticks) & 0x1);
}
/*
......@@ -347,9 +327,7 @@ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
*/
static int rcu_dynticks_snap(struct rcu_data *rdp)
{
int snap = atomic_add_return(0, &rdp->dynticks);
return snap & ~RCU_DYNTICK_CTRL_MASK;
return atomic_add_return(0, &rdp->dynticks);
}
/*
......@@ -358,7 +336,7 @@ static int rcu_dynticks_snap(struct rcu_data *rdp)
*/
static bool rcu_dynticks_in_eqs(int snap)
{
return !(snap & RCU_DYNTICK_CTRL_CTR);
return !(snap & 0x1);
}
/* Return true if the specified CPU is currently idle from an RCU viewpoint. */
......@@ -389,8 +367,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
int snap;
// If not quiescent, force back to earlier extended quiescent state.
snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK |
RCU_DYNTICK_CTRL_CTR);
snap = atomic_read(&rdp->dynticks) & ~0x1;
smp_rmb(); // Order ->dynticks and *vp reads.
if (READ_ONCE(*vp))
......@@ -398,32 +375,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
smp_rmb(); // Order *vp read and ->dynticks re-read.
// If still in the same extended quiescent state, we are good!
return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK);
}
/*
* Set the special (bottom) bit of the specified CPU so that it
* will take special action (such as flushing its TLB) on the
* next exit from an extended quiescent state. Returns true if
* the bit was successfully set, or false if the CPU was not in
* an extended quiescent state.
*/
bool rcu_eqs_special_set(int cpu)
{
int old;
int new;
int new_old;
struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
new_old = atomic_read(&rdp->dynticks);
do {
old = new_old;
if (old & RCU_DYNTICK_CTRL_CTR)
return false;
new = old | RCU_DYNTICK_CTRL_MASK;
new_old = atomic_cmpxchg(&rdp->dynticks, old, new);
} while (new_old != old);
return true;
return snap == atomic_read(&rdp->dynticks);
}
/*
......@@ -442,10 +394,9 @@ notrace void rcu_momentary_dyntick_idle(void)
int special;
raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
&this_cpu_ptr(&rcu_data)->dynticks);
special = atomic_add_return(2, &this_cpu_ptr(&rcu_data)->dynticks);
/* It is illegal to call this from idle state. */
WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
WARN_ON_ONCE(!(special & 0x1));
rcu_preempt_deferred_qs(current);
}
EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment