Commit aa40c138 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Report QS for outermost PREEMPT=n rcu_read_unlock() for strict GPs

The CONFIG_PREEMPT=n instance of rcu_read_unlock is even more
aggressively than that of CONFIG_PREEMPT=y in deferring reporting
quiescent states to the RCU core.  This is just what is wanted in normal
use because it reduces overhead, but the resulting delay is not what
is wanted for kernels built with CONFIG_RCU_STRICT_GRACE_PERIOD=y.
This commit therefore adds an rcu_read_unlock_strict() function that
checks for exceptional conditions, and reports the newly started
quiescent state if it is safe to do so, also doing a spin-delay if
requested via rcutree.rcu_unlock_delay.  This commit also adds a call
to rcu_read_unlock_strict() from the CONFIG_PREEMPT=n instance of
__rcu_read_unlock().

[ paulmck: Fixed bug located by kernel test robot <lkp@intel.com> ]
Reported-by Jann Horn <jannh@google.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent a657f261
...@@ -55,6 +55,12 @@ void __rcu_read_unlock(void); ...@@ -55,6 +55,12 @@ void __rcu_read_unlock(void);
#else /* #ifdef CONFIG_PREEMPT_RCU */ #else /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TINY_RCU
#define rcu_read_unlock_strict() do { } while (0)
#else
void rcu_read_unlock_strict(void);
#endif
static inline void __rcu_read_lock(void) static inline void __rcu_read_lock(void)
{ {
preempt_disable(); preempt_disable();
...@@ -63,6 +69,7 @@ static inline void __rcu_read_lock(void) ...@@ -63,6 +69,7 @@ static inline void __rcu_read_lock(void)
static inline void __rcu_read_unlock(void) static inline void __rcu_read_unlock(void)
{ {
preempt_enable(); preempt_enable();
rcu_read_unlock_strict();
} }
static inline int rcu_preempt_depth(void) static inline int rcu_preempt_depth(void)
......
...@@ -178,6 +178,12 @@ module_param(gp_init_delay, int, 0444); ...@@ -178,6 +178,12 @@ module_param(gp_init_delay, int, 0444);
static int gp_cleanup_delay; static int gp_cleanup_delay;
module_param(gp_cleanup_delay, int, 0444); module_param(gp_cleanup_delay, int, 0444);
// Add delay to rcu_read_unlock() for strict grace periods.
static int rcu_unlock_delay;
#ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
module_param(rcu_unlock_delay, int, 0444);
#endif
/* /*
* This rcu parameter is runtime-read-only. It reflects * This rcu parameter is runtime-read-only. It reflects
* a minimum allowed number of objects which can be cached * a minimum allowed number of objects which can be cached
......
...@@ -430,12 +430,6 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp) ...@@ -430,12 +430,6 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
return !list_empty(&rnp->blkd_tasks); return !list_empty(&rnp->blkd_tasks);
} }
// Add delay to rcu_read_unlock() for strict grace periods.
static int rcu_unlock_delay;
#ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
module_param(rcu_unlock_delay, int, 0444);
#endif
/* /*
* Report deferred quiescent states. The deferral time can * Report deferred quiescent states. The deferral time can
* be quite short, for example, in the case of the call from * be quite short, for example, in the case of the call from
...@@ -784,6 +778,24 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck) ...@@ -784,6 +778,24 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
#else /* #ifdef CONFIG_PREEMPT_RCU */ #else /* #ifdef CONFIG_PREEMPT_RCU */
/*
* If strict grace periods are enabled, and if the calling
* __rcu_read_unlock() marks the beginning of a quiescent state, immediately
* report that quiescent state and, if requested, spin for a bit.
*/
void rcu_read_unlock_strict(void)
{
struct rcu_data *rdp;
if (!IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ||
irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)
return;
rdp = this_cpu_ptr(&rcu_data);
rcu_report_qs_rdp(rdp->cpu, rdp);
udelay(rcu_unlock_delay);
}
EXPORT_SYMBOL_GPL(rcu_read_unlock_strict);
/* /*
* Tell them what RCU they are running. * Tell them what RCU they are running.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment