Commit d85b62f1 authored by Paul E. McKenney's avatar Paul E. McKenney

srcu: Force full grace-period ordering

If a process invokes synchronize_srcu(), is delayed just the right amount
of time, and thus does not sleep when waiting for the grace period to
complete, there is no ordering between the end of the grace period and
the code following the synchronize_srcu().  Similarly, there can be a
lack of ordering between the end of the SRCU grace period and callback
invocation.

This commit adds the necessary ordering.
Reported-by: default avatarLance Roy <ldr709@gmail.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
[ paulmck: Further smp_mb() adjustment per email with Lance Roy. ]
parent f2c46896
...@@ -1161,5 +1161,17 @@ do { \ ...@@ -1161,5 +1161,17 @@ do { \
ftrace_dump(oops_dump_mode); \ ftrace_dump(oops_dump_mode); \
} while (0) } while (0)
/*
* Place this after a lock-acquisition primitive to guarantee that
* an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies
* if the UNLOCK and LOCK are executed by the same CPU or if the
* UNLOCK and LOCK operate on the same lock variable.
*/
#ifdef CONFIG_PPC
#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
#else /* #ifdef CONFIG_PPC */
#define smp_mb__after_unlock_lock() do { } while (0)
#endif /* #else #ifdef CONFIG_PPC */
#endif /* __LINUX_RCUPDATE_H */ #endif /* __LINUX_RCUPDATE_H */
...@@ -358,6 +358,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head, ...@@ -358,6 +358,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
head->next = NULL; head->next = NULL;
head->func = func; head->func = func;
spin_lock_irqsave(&sp->queue_lock, flags); spin_lock_irqsave(&sp->queue_lock, flags);
smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */
rcu_batch_queue(&sp->batch_queue, head); rcu_batch_queue(&sp->batch_queue, head);
if (!sp->running) { if (!sp->running) {
sp->running = true; sp->running = true;
...@@ -391,6 +392,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) ...@@ -391,6 +392,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
head->next = NULL; head->next = NULL;
head->func = wakeme_after_rcu; head->func = wakeme_after_rcu;
spin_lock_irq(&sp->queue_lock); spin_lock_irq(&sp->queue_lock);
smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */
if (!sp->running) { if (!sp->running) {
/* steal the processing owner */ /* steal the processing owner */
sp->running = true; sp->running = true;
...@@ -410,8 +412,11 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) ...@@ -410,8 +412,11 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
spin_unlock_irq(&sp->queue_lock); spin_unlock_irq(&sp->queue_lock);
} }
if (!done) if (!done) {
wait_for_completion(&rcu.completion); wait_for_completion(&rcu.completion);
smp_mb(); /* Caller's later accesses after GP. */
}
} }
/** /**
...@@ -579,7 +584,8 @@ static void srcu_advance_batches(struct srcu_struct *sp, int trycount) ...@@ -579,7 +584,8 @@ static void srcu_advance_batches(struct srcu_struct *sp, int trycount)
/* /*
* Invoke a limited number of SRCU callbacks that have passed through * Invoke a limited number of SRCU callbacks that have passed through
* their grace period. If there are more to do, SRCU will reschedule * their grace period. If there are more to do, SRCU will reschedule
* the workqueue. * the workqueue. Note that needed memory barriers have been executed
* in this task's context by srcu_readers_active_idx_check().
*/ */
static void srcu_invoke_callbacks(struct srcu_struct *sp) static void srcu_invoke_callbacks(struct srcu_struct *sp)
{ {
......
...@@ -687,18 +687,6 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) ...@@ -687,18 +687,6 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
} }
#endif /* #ifdef CONFIG_RCU_TRACE */ #endif /* #ifdef CONFIG_RCU_TRACE */
/*
* Place this after a lock-acquisition primitive to guarantee that
* an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
* if the UNLOCK and LOCK are executed by the same CPU or if the
* UNLOCK and LOCK operate on the same lock variable.
*/
#ifdef CONFIG_PPC
#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
#else /* #ifdef CONFIG_PPC */
#define smp_mb__after_unlock_lock() do { } while (0)
#endif /* #else #ifdef CONFIG_PPC */
/* /*
* Wrappers for the rcu_node::lock acquire and release. * Wrappers for the rcu_node::lock acquire and release.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment