Commit e29a4915 authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Paul E. McKenney

srcu: Debug NMI safety even on archs that don't require it

Currently the NMI safety debugging is only performed on architectures
that don't support NMI-safe this_cpu_inc().

Reorder the code so that other architectures like x86 also detect bad
uses.

[ paulmck: Apply kernel test robot, Stephen Rothwell, and Zqiang feedback. ]
Signed-off-by: default avatarFrederic Weisbecker <frederic@kernel.org>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent ae3c0706
...@@ -65,14 +65,14 @@ unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp); ...@@ -65,14 +65,14 @@ unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie); bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
#ifdef CONFIG_NEED_SRCU_NMI_SAFE #ifdef CONFIG_NEED_SRCU_NMI_SAFE
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe) __acquires(ssp); int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx, bool chknmisafe) __releases(ssp); void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
#else #else
static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe) static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
{ {
return __srcu_read_lock(ssp); return __srcu_read_lock(ssp);
} }
static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx, bool chknmisafe) static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
{ {
__srcu_read_unlock(ssp, idx); __srcu_read_unlock(ssp, idx);
} }
...@@ -118,6 +118,18 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp) ...@@ -118,6 +118,18 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
#define SRCU_NMI_UNKNOWN 0x0
#define SRCU_NMI_UNSAFE 0x1
#define SRCU_NMI_SAFE 0x2
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_TREE_SRCU)
void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe);
#else
static inline void srcu_check_nmi_safety(struct srcu_struct *ssp,
bool nmi_safe) { }
#endif
/** /**
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
* @p: the pointer to fetch and protect for later dereferencing * @p: the pointer to fetch and protect for later dereferencing
...@@ -175,6 +187,7 @@ static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp) ...@@ -175,6 +187,7 @@ static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
{ {
int retval; int retval;
srcu_check_nmi_safety(ssp, false);
retval = __srcu_read_lock(ssp); retval = __srcu_read_lock(ssp);
rcu_lock_acquire(&(ssp)->dep_map); rcu_lock_acquire(&(ssp)->dep_map);
return retval; return retval;
...@@ -191,10 +204,8 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp ...@@ -191,10 +204,8 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp
{ {
int retval; int retval;
if (IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE)) srcu_check_nmi_safety(ssp, true);
retval = __srcu_read_lock_nmisafe(ssp, true); retval = __srcu_read_lock_nmisafe(ssp);
else
retval = __srcu_read_lock(ssp);
rcu_lock_acquire(&(ssp)->dep_map); rcu_lock_acquire(&(ssp)->dep_map);
return retval; return retval;
} }
...@@ -205,6 +216,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) ...@@ -205,6 +216,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
{ {
int retval; int retval;
srcu_check_nmi_safety(ssp, false);
retval = __srcu_read_lock(ssp); retval = __srcu_read_lock(ssp);
return retval; return retval;
} }
...@@ -220,6 +232,7 @@ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) ...@@ -220,6 +232,7 @@ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
__releases(ssp) __releases(ssp)
{ {
WARN_ON_ONCE(idx & ~0x1); WARN_ON_ONCE(idx & ~0x1);
srcu_check_nmi_safety(ssp, false);
rcu_lock_release(&(ssp)->dep_map); rcu_lock_release(&(ssp)->dep_map);
__srcu_read_unlock(ssp, idx); __srcu_read_unlock(ssp, idx);
} }
...@@ -235,17 +248,16 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) ...@@ -235,17 +248,16 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
__releases(ssp) __releases(ssp)
{ {
WARN_ON_ONCE(idx & ~0x1); WARN_ON_ONCE(idx & ~0x1);
srcu_check_nmi_safety(ssp, true);
rcu_lock_release(&(ssp)->dep_map); rcu_lock_release(&(ssp)->dep_map);
if (IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE)) __srcu_read_unlock_nmisafe(ssp, idx);
__srcu_read_unlock_nmisafe(ssp, idx, true);
else
__srcu_read_unlock(ssp, idx);
} }
/* Used by tracing, cannot be traced and cannot call lockdep. */ /* Used by tracing, cannot be traced and cannot call lockdep. */
static inline notrace void static inline notrace void
srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp) srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
{ {
srcu_check_nmi_safety(ssp, false);
__srcu_read_unlock(ssp, idx); __srcu_read_unlock(ssp, idx);
} }
......
...@@ -43,10 +43,6 @@ struct srcu_data { ...@@ -43,10 +43,6 @@ struct srcu_data {
struct srcu_struct *ssp; struct srcu_struct *ssp;
}; };
#define SRCU_NMI_UNKNOWN 0x0
#define SRCU_NMI_NMI_UNSAFE 0x1
#define SRCU_NMI_NMI_SAFE 0x2
/* /*
* Node in SRCU combining tree, similar in function to rcu_data. * Node in SRCU combining tree, similar in function to rcu_data.
*/ */
......
...@@ -631,17 +631,16 @@ void cleanup_srcu_struct(struct srcu_struct *ssp) ...@@ -631,17 +631,16 @@ void cleanup_srcu_struct(struct srcu_struct *ssp)
} }
EXPORT_SYMBOL_GPL(cleanup_srcu_struct); EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
#ifdef CONFIG_PROVE_RCU
/* /*
* Check for consistent NMI safety. * Check for consistent NMI safety.
*/ */
static void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe) void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
{ {
int nmi_safe_mask = 1 << nmi_safe; int nmi_safe_mask = 1 << nmi_safe;
int old_nmi_safe_mask; int old_nmi_safe_mask;
struct srcu_data *sdp; struct srcu_data *sdp;
if (!IS_ENABLED(CONFIG_PROVE_RCU))
return;
/* NMI-unsafe use in NMI is a bad sign */ /* NMI-unsafe use in NMI is a bad sign */
WARN_ON_ONCE(!nmi_safe && in_nmi()); WARN_ON_ONCE(!nmi_safe && in_nmi());
sdp = raw_cpu_ptr(ssp->sda); sdp = raw_cpu_ptr(ssp->sda);
...@@ -652,6 +651,8 @@ static void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe) ...@@ -652,6 +651,8 @@ static void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
} }
WARN_ONCE(old_nmi_safe_mask != nmi_safe_mask, "CPU %d old state %d new state %d\n", sdp->cpu, old_nmi_safe_mask, nmi_safe_mask); WARN_ONCE(old_nmi_safe_mask != nmi_safe_mask, "CPU %d old state %d new state %d\n", sdp->cpu, old_nmi_safe_mask, nmi_safe_mask);
} }
EXPORT_SYMBOL_GPL(srcu_check_nmi_safety);
#endif /* CONFIG_PROVE_RCU */
/* /*
* Counts the new reader in the appropriate per-CPU element of the * Counts the new reader in the appropriate per-CPU element of the
...@@ -665,7 +666,6 @@ int __srcu_read_lock(struct srcu_struct *ssp) ...@@ -665,7 +666,6 @@ int __srcu_read_lock(struct srcu_struct *ssp)
idx = READ_ONCE(ssp->srcu_idx) & 0x1; idx = READ_ONCE(ssp->srcu_idx) & 0x1;
this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter); this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter);
smp_mb(); /* B */ /* Avoid leaking the critical section. */ smp_mb(); /* B */ /* Avoid leaking the critical section. */
srcu_check_nmi_safety(ssp, false);
return idx; return idx;
} }
EXPORT_SYMBOL_GPL(__srcu_read_lock); EXPORT_SYMBOL_GPL(__srcu_read_lock);
...@@ -679,7 +679,6 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx) ...@@ -679,7 +679,6 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
{ {
smp_mb(); /* C */ /* Avoid leaking the critical section. */ smp_mb(); /* C */ /* Avoid leaking the critical section. */
this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter); this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter);
srcu_check_nmi_safety(ssp, false);
} }
EXPORT_SYMBOL_GPL(__srcu_read_unlock); EXPORT_SYMBOL_GPL(__srcu_read_unlock);
...@@ -690,7 +689,7 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock); ...@@ -690,7 +689,7 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
* srcu_struct, but in an NMI-safe manner using RMW atomics. * srcu_struct, but in an NMI-safe manner using RMW atomics.
* Returns an index that must be passed to the matching srcu_read_unlock(). * Returns an index that must be passed to the matching srcu_read_unlock().
*/ */
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe) int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
{ {
int idx; int idx;
struct srcu_data *sdp = raw_cpu_ptr(ssp->sda); struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
...@@ -698,8 +697,6 @@ int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe) ...@@ -698,8 +697,6 @@ int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe)
idx = READ_ONCE(ssp->srcu_idx) & 0x1; idx = READ_ONCE(ssp->srcu_idx) & 0x1;
atomic_long_inc(&sdp->srcu_lock_count[idx]); atomic_long_inc(&sdp->srcu_lock_count[idx]);
smp_mb__after_atomic(); /* B */ /* Avoid leaking the critical section. */ smp_mb__after_atomic(); /* B */ /* Avoid leaking the critical section. */
if (chknmisafe)
srcu_check_nmi_safety(ssp, true);
return idx; return idx;
} }
EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe); EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
...@@ -709,14 +706,12 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe); ...@@ -709,14 +706,12 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
* element of the srcu_struct. Note that this may well be a different * element of the srcu_struct. Note that this may well be a different
* CPU than that which was incremented by the corresponding srcu_read_lock(). * CPU than that which was incremented by the corresponding srcu_read_lock().
*/ */
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx, bool chknmisafe) void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
{ {
struct srcu_data *sdp = raw_cpu_ptr(ssp->sda); struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
smp_mb__before_atomic(); /* C */ /* Avoid leaking the critical section. */ smp_mb__before_atomic(); /* C */ /* Avoid leaking the critical section. */
atomic_long_inc(&sdp->srcu_unlock_count[idx]); atomic_long_inc(&sdp->srcu_unlock_count[idx]);
if (chknmisafe)
srcu_check_nmi_safety(ssp, true);
} }
EXPORT_SYMBOL_GPL(__srcu_read_unlock_nmisafe); EXPORT_SYMBOL_GPL(__srcu_read_unlock_nmisafe);
...@@ -1163,7 +1158,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp, ...@@ -1163,7 +1158,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
* SRCU read-side critical section so that the grace-period * SRCU read-side critical section so that the grace-period
* sequence number cannot wrap around in the meantime. * sequence number cannot wrap around in the meantime.
*/ */
idx = __srcu_read_lock_nmisafe(ssp, false); idx = __srcu_read_lock_nmisafe(ssp);
ss_state = smp_load_acquire(&ssp->srcu_size_state); ss_state = smp_load_acquire(&ssp->srcu_size_state);
if (ss_state < SRCU_SIZE_WAIT_CALL) if (ss_state < SRCU_SIZE_WAIT_CALL)
sdp = per_cpu_ptr(ssp->sda, 0); sdp = per_cpu_ptr(ssp->sda, 0);
...@@ -1196,7 +1191,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp, ...@@ -1196,7 +1191,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
srcu_funnel_gp_start(ssp, sdp, s, do_norm); srcu_funnel_gp_start(ssp, sdp, s, do_norm);
else if (needexp) else if (needexp)
srcu_funnel_exp_start(ssp, sdp_mynode, s); srcu_funnel_exp_start(ssp, sdp_mynode, s);
__srcu_read_unlock_nmisafe(ssp, idx, false); __srcu_read_unlock_nmisafe(ssp, idx);
return s; return s;
} }
...@@ -1500,13 +1495,13 @@ void srcu_barrier(struct srcu_struct *ssp) ...@@ -1500,13 +1495,13 @@ void srcu_barrier(struct srcu_struct *ssp)
/* Initial count prevents reaching zero until all CBs are posted. */ /* Initial count prevents reaching zero until all CBs are posted. */
atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
idx = __srcu_read_lock_nmisafe(ssp, false); idx = __srcu_read_lock_nmisafe(ssp);
if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER) if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0)); srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
else else
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu)); srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
__srcu_read_unlock_nmisafe(ssp, idx, false); __srcu_read_unlock_nmisafe(ssp, idx);
/* Remove the initial count, at which point reaching zero can happen. */ /* Remove the initial count, at which point reaching zero can happen. */
if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment