Commit 4f525a52 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Apply rcu_seq operations to _rcu_barrier()

The rcu_seq operations were open-coded in _rcu_barrier(), so this commit
replaces the open-coding with the shiny new rcu_seq operations.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 29fd9309
...@@ -661,7 +661,6 @@ TRACE_EVENT(rcu_torture_read, ...@@ -661,7 +661,6 @@ TRACE_EVENT(rcu_torture_read,
* Tracepoint for _rcu_barrier() execution. The string "s" describes * Tracepoint for _rcu_barrier() execution. The string "s" describes
* the _rcu_barrier phase: * the _rcu_barrier phase:
* "Begin": _rcu_barrier() started. * "Begin": _rcu_barrier() started.
* "Check": _rcu_barrier() checking for piggybacking.
* "EarlyExit": _rcu_barrier() piggybacked, thus early exit. * "EarlyExit": _rcu_barrier() piggybacked, thus early exit.
* "Inc1": _rcu_barrier() piggyback check counter incremented. * "Inc1": _rcu_barrier() piggyback check counter incremented.
* "OfflineNoCB": _rcu_barrier() found callback on never-online CPU * "OfflineNoCB": _rcu_barrier() found callback on never-online CPU
......
...@@ -3568,10 +3568,10 @@ static void rcu_barrier_callback(struct rcu_head *rhp) ...@@ -3568,10 +3568,10 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
struct rcu_state *rsp = rdp->rsp; struct rcu_state *rsp = rdp->rsp;
if (atomic_dec_and_test(&rsp->barrier_cpu_count)) { if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
_rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done); _rcu_barrier_trace(rsp, "LastCB", -1, rsp->barrier_sequence);
complete(&rsp->barrier_completion); complete(&rsp->barrier_completion);
} else { } else {
_rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done); _rcu_barrier_trace(rsp, "CB", -1, rsp->barrier_sequence);
} }
} }
...@@ -3583,7 +3583,7 @@ static void rcu_barrier_func(void *type) ...@@ -3583,7 +3583,7 @@ static void rcu_barrier_func(void *type)
struct rcu_state *rsp = type; struct rcu_state *rsp = type;
struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
_rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done); _rcu_barrier_trace(rsp, "IRQ", -1, rsp->barrier_sequence);
atomic_inc(&rsp->barrier_cpu_count); atomic_inc(&rsp->barrier_cpu_count);
rsp->call(&rdp->barrier_head, rcu_barrier_callback); rsp->call(&rdp->barrier_head, rcu_barrier_callback);
} }
...@@ -3596,55 +3596,24 @@ static void _rcu_barrier(struct rcu_state *rsp) ...@@ -3596,55 +3596,24 @@ static void _rcu_barrier(struct rcu_state *rsp)
{ {
int cpu; int cpu;
struct rcu_data *rdp; struct rcu_data *rdp;
unsigned long snap = READ_ONCE(rsp->n_barrier_done); unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
unsigned long snap_done;
_rcu_barrier_trace(rsp, "Begin", -1, snap); _rcu_barrier_trace(rsp, "Begin", -1, s);
/* Take mutex to serialize concurrent rcu_barrier() requests. */ /* Take mutex to serialize concurrent rcu_barrier() requests. */
mutex_lock(&rsp->barrier_mutex); mutex_lock(&rsp->barrier_mutex);
/* /* Did someone else do our work for us? */
* Ensure that all prior references, including to ->n_barrier_done, if (rcu_seq_done(&rsp->barrier_sequence, s)) {
* are ordered before the _rcu_barrier() machinery. _rcu_barrier_trace(rsp, "EarlyExit", -1, rsp->barrier_sequence);
*/
smp_mb(); /* See above block comment. */
/*
* Recheck ->n_barrier_done to see if others did our work for us.
* This means checking ->n_barrier_done for an even-to-odd-to-even
* transition. The "if" expression below therefore rounds the old
* value up to the next even number and adds two before comparing.
*/
snap_done = rsp->n_barrier_done;
_rcu_barrier_trace(rsp, "Check", -1, snap_done);
/*
* If the value in snap is odd, we needed to wait for the current
* rcu_barrier() to complete, then wait for the next one, in other
* words, we need the value of snap_done to be three larger than
* the value of snap. On the other hand, if the value in snap is
* even, we only had to wait for the next rcu_barrier() to complete,
* in other words, we need the value of snap_done to be only two
* greater than the value of snap. The "(snap + 3) & ~0x1" computes
* this for us (thank you, Linus!).
*/
if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
_rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
smp_mb(); /* caller's subsequent code after above check. */ smp_mb(); /* caller's subsequent code after above check. */
mutex_unlock(&rsp->barrier_mutex); mutex_unlock(&rsp->barrier_mutex);
return; return;
} }
/* /* Mark the start of the barrier operation. */
* Increment ->n_barrier_done to avoid duplicate work. Use rcu_seq_start(&rsp->barrier_sequence);
* WRITE_ONCE() to prevent the compiler from speculating _rcu_barrier_trace(rsp, "Inc1", -1, rsp->barrier_sequence);
* the increment to precede the early-exit check.
*/
WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
_rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
/* /*
* Initialize the count to one rather than to zero in order to * Initialize the count to one rather than to zero in order to
...@@ -3668,10 +3637,10 @@ static void _rcu_barrier(struct rcu_state *rsp) ...@@ -3668,10 +3637,10 @@ static void _rcu_barrier(struct rcu_state *rsp)
if (rcu_is_nocb_cpu(cpu)) { if (rcu_is_nocb_cpu(cpu)) {
if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) { if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
_rcu_barrier_trace(rsp, "OfflineNoCB", cpu, _rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
rsp->n_barrier_done); rsp->barrier_sequence);
} else { } else {
_rcu_barrier_trace(rsp, "OnlineNoCB", cpu, _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
rsp->n_barrier_done); rsp->barrier_sequence);
smp_mb__before_atomic(); smp_mb__before_atomic();
atomic_inc(&rsp->barrier_cpu_count); atomic_inc(&rsp->barrier_cpu_count);
__call_rcu(&rdp->barrier_head, __call_rcu(&rdp->barrier_head,
...@@ -3679,11 +3648,11 @@ static void _rcu_barrier(struct rcu_state *rsp) ...@@ -3679,11 +3648,11 @@ static void _rcu_barrier(struct rcu_state *rsp)
} }
} else if (READ_ONCE(rdp->qlen)) { } else if (READ_ONCE(rdp->qlen)) {
_rcu_barrier_trace(rsp, "OnlineQ", cpu, _rcu_barrier_trace(rsp, "OnlineQ", cpu,
rsp->n_barrier_done); rsp->barrier_sequence);
smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
} else { } else {
_rcu_barrier_trace(rsp, "OnlineNQ", cpu, _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
rsp->n_barrier_done); rsp->barrier_sequence);
} }
} }
put_online_cpus(); put_online_cpus();
...@@ -3695,16 +3664,13 @@ static void _rcu_barrier(struct rcu_state *rsp) ...@@ -3695,16 +3664,13 @@ static void _rcu_barrier(struct rcu_state *rsp)
if (atomic_dec_and_test(&rsp->barrier_cpu_count)) if (atomic_dec_and_test(&rsp->barrier_cpu_count))
complete(&rsp->barrier_completion); complete(&rsp->barrier_completion);
/* Increment ->n_barrier_done to prevent duplicate work. */
smp_mb(); /* Keep increment after above mechanism. */
WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
_rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
smp_mb(); /* Keep increment before caller's subsequent code. */
/* Wait for all rcu_barrier_callback() callbacks to be invoked. */ /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
wait_for_completion(&rsp->barrier_completion); wait_for_completion(&rsp->barrier_completion);
/* Mark the end of the barrier operation. */
_rcu_barrier_trace(rsp, "Inc2", -1, rsp->barrier_sequence);
rcu_seq_end(&rsp->barrier_sequence);
/* Other rcu_barrier() invocations can now safely proceed. */ /* Other rcu_barrier() invocations can now safely proceed. */
mutex_unlock(&rsp->barrier_mutex); mutex_unlock(&rsp->barrier_mutex);
} }
......
...@@ -486,7 +486,7 @@ struct rcu_state { ...@@ -486,7 +486,7 @@ struct rcu_state {
struct mutex barrier_mutex; /* Guards barrier fields. */ struct mutex barrier_mutex; /* Guards barrier fields. */
atomic_t barrier_cpu_count; /* # CPUs waiting on. */ atomic_t barrier_cpu_count; /* # CPUs waiting on. */
struct completion barrier_completion; /* Wake at barrier end. */ struct completion barrier_completion; /* Wake at barrier end. */
unsigned long n_barrier_done; /* ++ at start and end of */ unsigned long barrier_sequence; /* ++ at start and end of */
/* _rcu_barrier(). */ /* _rcu_barrier(). */
/* End of fields guarded by barrier_mutex. */ /* End of fields guarded by barrier_mutex. */
......
...@@ -81,9 +81,9 @@ static void r_stop(struct seq_file *m, void *v) ...@@ -81,9 +81,9 @@ static void r_stop(struct seq_file *m, void *v)
static int show_rcubarrier(struct seq_file *m, void *v) static int show_rcubarrier(struct seq_file *m, void *v)
{ {
struct rcu_state *rsp = (struct rcu_state *)m->private; struct rcu_state *rsp = (struct rcu_state *)m->private;
seq_printf(m, "bcc: %d nbd: %lu\n", seq_printf(m, "bcc: %d bseq: %lu\n",
atomic_read(&rsp->barrier_cpu_count), atomic_read(&rsp->barrier_cpu_count),
rsp->n_barrier_done); rsp->barrier_sequence);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment