Commit 78e4bc34 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Fix and comment ordering around wait_event()

It is all too easy to forget that wait_event() does not necessarily
imply a full memory barrier.  The case where it does not is where the
condition transitions to true just as wait_event() starts execution.
This is actually a feature: The standard use of wait_event() involves
locking, in which case the locks provide the needed ordering (you hold a
lock across the wake_up() and acquire that same lock after wait_event()
returns).

Given that I did forget that wait_event() does not necessarily imply a
full memory barrier in one case, this commit fixes that case.  This commit
also adds comments calling out the placement of existing memory barriers
relied on by wait_event() calls.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 6193c76a
...@@ -1578,6 +1578,7 @@ static int rcu_torture_barrier_cbs(void *arg) ...@@ -1578,6 +1578,7 @@ static int rcu_torture_barrier_cbs(void *arg)
{ {
long myid = (long)arg; long myid = (long)arg;
bool lastphase = 0; bool lastphase = 0;
bool newphase;
struct rcu_head rcu; struct rcu_head rcu;
init_rcu_head_on_stack(&rcu); init_rcu_head_on_stack(&rcu);
...@@ -1585,10 +1586,11 @@ static int rcu_torture_barrier_cbs(void *arg) ...@@ -1585,10 +1586,11 @@ static int rcu_torture_barrier_cbs(void *arg)
set_user_nice(current, 19); set_user_nice(current, 19);
do { do {
wait_event(barrier_cbs_wq[myid], wait_event(barrier_cbs_wq[myid],
barrier_phase != lastphase || (newphase =
ACCESS_ONCE(barrier_phase)) != lastphase ||
kthread_should_stop() || kthread_should_stop() ||
fullstop != FULLSTOP_DONTSTOP); fullstop != FULLSTOP_DONTSTOP);
lastphase = barrier_phase; lastphase = newphase;
smp_mb(); /* ensure barrier_phase load before ->call(). */ smp_mb(); /* ensure barrier_phase load before ->call(). */
if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
break; break;
...@@ -1625,7 +1627,7 @@ static int rcu_torture_barrier(void *arg) ...@@ -1625,7 +1627,7 @@ static int rcu_torture_barrier(void *arg)
if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
break; break;
n_barrier_attempts++; n_barrier_attempts++;
cur_ops->cb_barrier(); cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
n_rcu_torture_barrier_error++; n_rcu_torture_barrier_error++;
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
......
...@@ -1533,6 +1533,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) ...@@ -1533,6 +1533,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
rdp = this_cpu_ptr(rsp->rda); rdp = this_cpu_ptr(rsp->rda);
if (rnp == rdp->mynode) if (rnp == rdp->mynode)
__note_gp_changes(rsp, rnp, rdp); __note_gp_changes(rsp, rnp, rdp);
/* smp_mb() provided by prior unlock-lock pair. */
nocb += rcu_future_gp_cleanup(rsp, rnp); nocb += rcu_future_gp_cleanup(rsp, rnp);
raw_spin_unlock_irq(&rnp->lock); raw_spin_unlock_irq(&rnp->lock);
cond_resched(); cond_resched();
...@@ -1577,6 +1578,7 @@ static int __noreturn rcu_gp_kthread(void *arg) ...@@ -1577,6 +1578,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
wait_event_interruptible(rsp->gp_wq, wait_event_interruptible(rsp->gp_wq,
ACCESS_ONCE(rsp->gp_flags) & ACCESS_ONCE(rsp->gp_flags) &
RCU_GP_FLAG_INIT); RCU_GP_FLAG_INIT);
/* Locking provides needed memory barrier. */
if (rcu_gp_init(rsp)) if (rcu_gp_init(rsp))
break; break;
cond_resched(); cond_resched();
...@@ -1606,6 +1608,7 @@ static int __noreturn rcu_gp_kthread(void *arg) ...@@ -1606,6 +1608,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
(!ACCESS_ONCE(rnp->qsmask) && (!ACCESS_ONCE(rnp->qsmask) &&
!rcu_preempt_blocked_readers_cgp(rnp)), !rcu_preempt_blocked_readers_cgp(rnp)),
j); j);
/* Locking provides needed memory barriers. */
/* If grace period done, leave loop. */ /* If grace period done, leave loop. */
if (!ACCESS_ONCE(rnp->qsmask) && if (!ACCESS_ONCE(rnp->qsmask) &&
!rcu_preempt_blocked_readers_cgp(rnp)) !rcu_preempt_blocked_readers_cgp(rnp))
......
...@@ -779,8 +779,10 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, ...@@ -779,8 +779,10 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
} }
if (rnp->parent == NULL) { if (rnp->parent == NULL) {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
if (wake) if (wake) {
smp_mb(); /* EGP done before wake_up(). */
wake_up(&sync_rcu_preempt_exp_wq); wake_up(&sync_rcu_preempt_exp_wq);
}
break; break;
} }
mask = rnp->grpmask; mask = rnp->grpmask;
...@@ -1852,6 +1854,7 @@ static int rcu_oom_notify(struct notifier_block *self, ...@@ -1852,6 +1854,7 @@ static int rcu_oom_notify(struct notifier_block *self,
/* Wait for callbacks from earlier instance to complete. */ /* Wait for callbacks from earlier instance to complete. */
wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0); wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
smp_mb(); /* Ensure callback reuse happens after callback invocation. */
/* /*
* Prevent premature wakeup: ensure that all increments happen * Prevent premature wakeup: ensure that all increments happen
...@@ -2250,6 +2253,7 @@ static int rcu_nocb_kthread(void *arg) ...@@ -2250,6 +2253,7 @@ static int rcu_nocb_kthread(void *arg)
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
TPS("Sleep")); TPS("Sleep"));
wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
/* Memory barrier provide by xchg() below. */
} else if (firsttime) { } else if (firsttime) {
firsttime = 0; firsttime = 0;
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment