Commit 17ef2fe9 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Make rcutorture's batches-completed API use ->gp_seq

The rcutorture test invokes rcu_batches_started(),
rcu_batches_completed(), rcu_batches_started_bh(),
rcu_batches_completed_bh(), rcu_batches_started_sched(), and
rcu_batches_completed_sched() to do grace-period consistency checks,
and rcuperf uses the _completed variants for statistics.
These functions use ->gpnum and ->completed.  This commit therefore
replaces them with rcu_get_gp_seq(), rcu_bh_get_gp_seq(), and
rcu_sched_get_gp_seq(), adjusting rcutorture and rcuperf to make
use of them.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent dee4f422
...@@ -463,12 +463,9 @@ void srcutorture_get_gp_data(enum rcutorture_type test_type, ...@@ -463,12 +463,9 @@ void srcutorture_get_gp_data(enum rcutorture_type test_type,
#endif #endif
#ifdef CONFIG_TINY_RCU #ifdef CONFIG_TINY_RCU
static inline unsigned long rcu_batches_started(void) { return 0; } static inline unsigned long rcu_get_gp_seq(void) { return 0; }
static inline unsigned long rcu_batches_started_bh(void) { return 0; } static inline unsigned long rcu_bh_get_gp_seq(void) { return 0; }
static inline unsigned long rcu_batches_started_sched(void) { return 0; } static inline unsigned long rcu_sched_get_gp_seq(void) { return 0; }
static inline unsigned long rcu_batches_completed(void) { return 0; }
static inline unsigned long rcu_batches_completed_bh(void) { return 0; }
static inline unsigned long rcu_batches_completed_sched(void) { return 0; }
static inline unsigned long rcu_exp_batches_completed(void) { return 0; } static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; } static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; }
static inline unsigned long static inline unsigned long
...@@ -480,12 +477,9 @@ static inline void show_rcu_gp_kthreads(void) { } ...@@ -480,12 +477,9 @@ static inline void show_rcu_gp_kthreads(void) { }
#else /* #ifdef CONFIG_TINY_RCU */ #else /* #ifdef CONFIG_TINY_RCU */
extern unsigned long rcutorture_testseq; extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum; extern unsigned long rcutorture_vernum;
unsigned long rcu_batches_started(void); unsigned long rcu_get_gp_seq(void);
unsigned long rcu_batches_started_bh(void); unsigned long rcu_bh_get_gp_seq(void);
unsigned long rcu_batches_started_sched(void); unsigned long rcu_sched_get_gp_seq(void);
unsigned long rcu_batches_completed(void);
unsigned long rcu_batches_completed_bh(void);
unsigned long rcu_batches_completed_sched(void);
unsigned long rcu_exp_batches_completed(void); unsigned long rcu_exp_batches_completed(void);
unsigned long rcu_exp_batches_completed_sched(void); unsigned long rcu_exp_batches_completed_sched(void);
unsigned long srcu_batches_completed(struct srcu_struct *sp); unsigned long srcu_batches_completed(struct srcu_struct *sp);
......
...@@ -138,8 +138,7 @@ struct rcu_perf_ops { ...@@ -138,8 +138,7 @@ struct rcu_perf_ops {
void (*cleanup)(void); void (*cleanup)(void);
int (*readlock)(void); int (*readlock)(void);
void (*readunlock)(int idx); void (*readunlock)(int idx);
unsigned long (*started)(void); unsigned long (*get_gp_seq)(void);
unsigned long (*completed)(void);
unsigned long (*exp_completed)(void); unsigned long (*exp_completed)(void);
void (*async)(struct rcu_head *head, rcu_callback_t func); void (*async)(struct rcu_head *head, rcu_callback_t func);
void (*gp_barrier)(void); void (*gp_barrier)(void);
...@@ -179,8 +178,7 @@ static struct rcu_perf_ops rcu_ops = { ...@@ -179,8 +178,7 @@ static struct rcu_perf_ops rcu_ops = {
.init = rcu_sync_perf_init, .init = rcu_sync_perf_init,
.readlock = rcu_perf_read_lock, .readlock = rcu_perf_read_lock,
.readunlock = rcu_perf_read_unlock, .readunlock = rcu_perf_read_unlock,
.started = rcu_batches_started, .get_gp_seq = rcu_get_gp_seq,
.completed = rcu_batches_completed,
.exp_completed = rcu_exp_batches_completed, .exp_completed = rcu_exp_batches_completed,
.async = call_rcu, .async = call_rcu,
.gp_barrier = rcu_barrier, .gp_barrier = rcu_barrier,
...@@ -209,8 +207,7 @@ static struct rcu_perf_ops rcu_bh_ops = { ...@@ -209,8 +207,7 @@ static struct rcu_perf_ops rcu_bh_ops = {
.init = rcu_sync_perf_init, .init = rcu_sync_perf_init,
.readlock = rcu_bh_perf_read_lock, .readlock = rcu_bh_perf_read_lock,
.readunlock = rcu_bh_perf_read_unlock, .readunlock = rcu_bh_perf_read_unlock,
.started = rcu_batches_started_bh, .get_gp_seq = rcu_bh_get_gp_seq,
.completed = rcu_batches_completed_bh,
.exp_completed = rcu_exp_batches_completed_sched, .exp_completed = rcu_exp_batches_completed_sched,
.async = call_rcu_bh, .async = call_rcu_bh,
.gp_barrier = rcu_barrier_bh, .gp_barrier = rcu_barrier_bh,
...@@ -266,8 +263,7 @@ static struct rcu_perf_ops srcu_ops = { ...@@ -266,8 +263,7 @@ static struct rcu_perf_ops srcu_ops = {
.init = rcu_sync_perf_init, .init = rcu_sync_perf_init,
.readlock = srcu_perf_read_lock, .readlock = srcu_perf_read_lock,
.readunlock = srcu_perf_read_unlock, .readunlock = srcu_perf_read_unlock,
.started = NULL, .get_gp_seq = srcu_perf_completed,
.completed = srcu_perf_completed,
.exp_completed = srcu_perf_completed, .exp_completed = srcu_perf_completed,
.async = srcu_call_rcu, .async = srcu_call_rcu,
.gp_barrier = srcu_rcu_barrier, .gp_barrier = srcu_rcu_barrier,
...@@ -295,8 +291,7 @@ static struct rcu_perf_ops srcud_ops = { ...@@ -295,8 +291,7 @@ static struct rcu_perf_ops srcud_ops = {
.cleanup = srcu_sync_perf_cleanup, .cleanup = srcu_sync_perf_cleanup,
.readlock = srcu_perf_read_lock, .readlock = srcu_perf_read_lock,
.readunlock = srcu_perf_read_unlock, .readunlock = srcu_perf_read_unlock,
.started = NULL, .get_gp_seq = srcu_perf_completed,
.completed = srcu_perf_completed,
.exp_completed = srcu_perf_completed, .exp_completed = srcu_perf_completed,
.async = srcu_call_rcu, .async = srcu_call_rcu,
.gp_barrier = srcu_rcu_barrier, .gp_barrier = srcu_rcu_barrier,
...@@ -325,8 +320,7 @@ static struct rcu_perf_ops sched_ops = { ...@@ -325,8 +320,7 @@ static struct rcu_perf_ops sched_ops = {
.init = rcu_sync_perf_init, .init = rcu_sync_perf_init,
.readlock = sched_perf_read_lock, .readlock = sched_perf_read_lock,
.readunlock = sched_perf_read_unlock, .readunlock = sched_perf_read_unlock,
.started = rcu_batches_started_sched, .get_gp_seq = rcu_sched_get_gp_seq,
.completed = rcu_batches_completed_sched,
.exp_completed = rcu_exp_batches_completed_sched, .exp_completed = rcu_exp_batches_completed_sched,
.async = call_rcu_sched, .async = call_rcu_sched,
.gp_barrier = rcu_barrier_sched, .gp_barrier = rcu_barrier_sched,
...@@ -353,8 +347,7 @@ static struct rcu_perf_ops tasks_ops = { ...@@ -353,8 +347,7 @@ static struct rcu_perf_ops tasks_ops = {
.init = rcu_sync_perf_init, .init = rcu_sync_perf_init,
.readlock = tasks_perf_read_lock, .readlock = tasks_perf_read_lock,
.readunlock = tasks_perf_read_unlock, .readunlock = tasks_perf_read_unlock,
.started = rcu_no_completed, .get_gp_seq = rcu_no_completed,
.completed = rcu_no_completed,
.async = call_rcu_tasks, .async = call_rcu_tasks,
.gp_barrier = rcu_barrier_tasks, .gp_barrier = rcu_barrier_tasks,
.sync = synchronize_rcu_tasks, .sync = synchronize_rcu_tasks,
...@@ -447,8 +440,7 @@ rcu_perf_writer(void *arg) ...@@ -447,8 +440,7 @@ rcu_perf_writer(void *arg)
b_rcu_perf_writer_started = b_rcu_perf_writer_started =
cur_ops->exp_completed() / 2; cur_ops->exp_completed() / 2;
} else { } else {
b_rcu_perf_writer_started = b_rcu_perf_writer_started = cur_ops->get_gp_seq();
cur_ops->completed();
} }
} }
...@@ -505,7 +497,7 @@ rcu_perf_writer(void *arg) ...@@ -505,7 +497,7 @@ rcu_perf_writer(void *arg)
cur_ops->exp_completed() / 2; cur_ops->exp_completed() / 2;
} else { } else {
b_rcu_perf_writer_finished = b_rcu_perf_writer_finished =
cur_ops->completed(); cur_ops->get_gp_seq();
} }
if (shutdown) { if (shutdown) {
smp_mb(); /* Assign before wake. */ smp_mb(); /* Assign before wake. */
......
...@@ -264,8 +264,7 @@ struct rcu_torture_ops { ...@@ -264,8 +264,7 @@ struct rcu_torture_ops {
int (*readlock)(void); int (*readlock)(void);
void (*read_delay)(struct torture_random_state *rrsp); void (*read_delay)(struct torture_random_state *rrsp);
void (*readunlock)(int idx); void (*readunlock)(int idx);
unsigned long (*started)(void); unsigned long (*get_gp_seq)(void);
unsigned long (*completed)(void);
void (*deferred_free)(struct rcu_torture *p); void (*deferred_free)(struct rcu_torture *p);
void (*sync)(void); void (*sync)(void);
void (*exp_sync)(void); void (*exp_sync)(void);
...@@ -305,10 +304,10 @@ static void rcu_read_delay(struct torture_random_state *rrsp) ...@@ -305,10 +304,10 @@ static void rcu_read_delay(struct torture_random_state *rrsp)
* force_quiescent_state. */ * force_quiescent_state. */
if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
started = cur_ops->completed(); started = cur_ops->get_gp_seq();
ts = rcu_trace_clock_local(); ts = rcu_trace_clock_local();
mdelay(longdelay_ms); mdelay(longdelay_ms);
completed = cur_ops->completed(); completed = cur_ops->get_gp_seq();
do_trace_rcu_torture_read(cur_ops->name, NULL, ts, do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
started, completed); started, completed);
} }
...@@ -400,8 +399,7 @@ static struct rcu_torture_ops rcu_ops = { ...@@ -400,8 +399,7 @@ static struct rcu_torture_ops rcu_ops = {
.readlock = rcu_torture_read_lock, .readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay, .read_delay = rcu_read_delay,
.readunlock = rcu_torture_read_unlock, .readunlock = rcu_torture_read_unlock,
.started = rcu_batches_started, .get_gp_seq = rcu_get_gp_seq,
.completed = rcu_batches_completed,
.deferred_free = rcu_torture_deferred_free, .deferred_free = rcu_torture_deferred_free,
.sync = synchronize_rcu, .sync = synchronize_rcu,
.exp_sync = synchronize_rcu_expedited, .exp_sync = synchronize_rcu_expedited,
...@@ -442,8 +440,7 @@ static struct rcu_torture_ops rcu_bh_ops = { ...@@ -442,8 +440,7 @@ static struct rcu_torture_ops rcu_bh_ops = {
.readlock = rcu_bh_torture_read_lock, .readlock = rcu_bh_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_bh_torture_read_unlock, .readunlock = rcu_bh_torture_read_unlock,
.started = rcu_batches_started_bh, .get_gp_seq = rcu_bh_get_gp_seq,
.completed = rcu_batches_completed_bh,
.deferred_free = rcu_bh_torture_deferred_free, .deferred_free = rcu_bh_torture_deferred_free,
.sync = synchronize_rcu_bh, .sync = synchronize_rcu_bh,
.exp_sync = synchronize_rcu_bh_expedited, .exp_sync = synchronize_rcu_bh_expedited,
...@@ -486,8 +483,7 @@ static struct rcu_torture_ops rcu_busted_ops = { ...@@ -486,8 +483,7 @@ static struct rcu_torture_ops rcu_busted_ops = {
.readlock = rcu_torture_read_lock, .readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_torture_read_unlock, .readunlock = rcu_torture_read_unlock,
.started = rcu_no_completed, .get_gp_seq = rcu_no_completed,
.completed = rcu_no_completed,
.deferred_free = rcu_busted_torture_deferred_free, .deferred_free = rcu_busted_torture_deferred_free,
.sync = synchronize_rcu_busted, .sync = synchronize_rcu_busted,
.exp_sync = synchronize_rcu_busted, .exp_sync = synchronize_rcu_busted,
...@@ -575,8 +571,7 @@ static struct rcu_torture_ops srcu_ops = { ...@@ -575,8 +571,7 @@ static struct rcu_torture_ops srcu_ops = {
.readlock = srcu_torture_read_lock, .readlock = srcu_torture_read_lock,
.read_delay = srcu_read_delay, .read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock, .readunlock = srcu_torture_read_unlock,
.started = NULL, .get_gp_seq = srcu_torture_completed,
.completed = srcu_torture_completed,
.deferred_free = srcu_torture_deferred_free, .deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize, .sync = srcu_torture_synchronize,
.exp_sync = srcu_torture_synchronize_expedited, .exp_sync = srcu_torture_synchronize_expedited,
...@@ -613,8 +608,7 @@ static struct rcu_torture_ops srcud_ops = { ...@@ -613,8 +608,7 @@ static struct rcu_torture_ops srcud_ops = {
.readlock = srcu_torture_read_lock, .readlock = srcu_torture_read_lock,
.read_delay = srcu_read_delay, .read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock, .readunlock = srcu_torture_read_unlock,
.started = NULL, .get_gp_seq = srcu_torture_completed,
.completed = srcu_torture_completed,
.deferred_free = srcu_torture_deferred_free, .deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize, .sync = srcu_torture_synchronize,
.exp_sync = srcu_torture_synchronize_expedited, .exp_sync = srcu_torture_synchronize_expedited,
...@@ -651,8 +645,7 @@ static struct rcu_torture_ops sched_ops = { ...@@ -651,8 +645,7 @@ static struct rcu_torture_ops sched_ops = {
.readlock = sched_torture_read_lock, .readlock = sched_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock, .readunlock = sched_torture_read_unlock,
.started = rcu_batches_started_sched, .get_gp_seq = rcu_sched_get_gp_seq,
.completed = rcu_batches_completed_sched,
.deferred_free = rcu_sched_torture_deferred_free, .deferred_free = rcu_sched_torture_deferred_free,
.sync = synchronize_sched, .sync = synchronize_sched,
.exp_sync = synchronize_sched_expedited, .exp_sync = synchronize_sched_expedited,
...@@ -690,8 +683,7 @@ static struct rcu_torture_ops tasks_ops = { ...@@ -690,8 +683,7 @@ static struct rcu_torture_ops tasks_ops = {
.readlock = tasks_torture_read_lock, .readlock = tasks_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = tasks_torture_read_unlock, .readunlock = tasks_torture_read_unlock,
.started = rcu_no_completed, .get_gp_seq = rcu_no_completed,
.completed = rcu_no_completed,
.deferred_free = rcu_tasks_torture_deferred_free, .deferred_free = rcu_tasks_torture_deferred_free,
.sync = synchronize_rcu_tasks, .sync = synchronize_rcu_tasks,
.exp_sync = synchronize_rcu_tasks, .exp_sync = synchronize_rcu_tasks,
...@@ -1104,10 +1096,7 @@ static void rcu_torture_timer(struct timer_list *unused) ...@@ -1104,10 +1096,7 @@ static void rcu_torture_timer(struct timer_list *unused)
unsigned long long ts; unsigned long long ts;
idx = cur_ops->readlock(); idx = cur_ops->readlock();
if (cur_ops->started) started = cur_ops->get_gp_seq();
started = cur_ops->started();
else
started = cur_ops->completed();
ts = rcu_trace_clock_local(); ts = rcu_trace_clock_local();
p = rcu_dereference_check(rcu_torture_current, p = rcu_dereference_check(rcu_torture_current,
rcu_read_lock_bh_held() || rcu_read_lock_bh_held() ||
...@@ -1131,7 +1120,7 @@ static void rcu_torture_timer(struct timer_list *unused) ...@@ -1131,7 +1120,7 @@ static void rcu_torture_timer(struct timer_list *unused)
/* Should not happen, but... */ /* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN; pipe_count = RCU_TORTURE_PIPE_LEN;
} }
completed = cur_ops->completed(); completed = cur_ops->get_gp_seq();
if (pipe_count > 1) { if (pipe_count > 1) {
do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
started, completed); started, completed);
...@@ -1139,8 +1128,8 @@ static void rcu_torture_timer(struct timer_list *unused) ...@@ -1139,8 +1128,8 @@ static void rcu_torture_timer(struct timer_list *unused)
} }
__this_cpu_inc(rcu_torture_count[pipe_count]); __this_cpu_inc(rcu_torture_count[pipe_count]);
completed = completed - started; completed = completed - started;
if (cur_ops->started) if (completed > ULONG_MAX >> 1)
completed++; completed = 0; /* Not all gp_seq have full range. */
if (completed > RCU_TORTURE_PIPE_LEN) { if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */ /* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN; completed = RCU_TORTURE_PIPE_LEN;
...@@ -1187,10 +1176,7 @@ rcu_torture_reader(void *arg) ...@@ -1187,10 +1176,7 @@ rcu_torture_reader(void *arg)
mod_timer(&t, jiffies + 1); mod_timer(&t, jiffies + 1);
} }
idx = cur_ops->readlock(); idx = cur_ops->readlock();
if (cur_ops->started) started = cur_ops->get_gp_seq();
started = cur_ops->started();
else
started = cur_ops->completed();
ts = rcu_trace_clock_local(); ts = rcu_trace_clock_local();
p = rcu_dereference_check(rcu_torture_current, p = rcu_dereference_check(rcu_torture_current,
rcu_read_lock_bh_held() || rcu_read_lock_bh_held() ||
...@@ -1212,7 +1198,7 @@ rcu_torture_reader(void *arg) ...@@ -1212,7 +1198,7 @@ rcu_torture_reader(void *arg)
/* Should not happen, but... */ /* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN; pipe_count = RCU_TORTURE_PIPE_LEN;
} }
completed = cur_ops->completed(); completed = cur_ops->get_gp_seq();
if (pipe_count > 1) { if (pipe_count > 1) {
do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
ts, started, completed); ts, started, completed);
...@@ -1220,8 +1206,8 @@ rcu_torture_reader(void *arg) ...@@ -1220,8 +1206,8 @@ rcu_torture_reader(void *arg)
} }
__this_cpu_inc(rcu_torture_count[pipe_count]); __this_cpu_inc(rcu_torture_count[pipe_count]);
completed = completed - started; completed = completed - started;
if (cur_ops->started) if (completed > ULONG_MAX >> 1)
completed++; completed = 0; /* Not all gp_seq have full range. */
if (completed > RCU_TORTURE_PIPE_LEN) { if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */ /* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN; completed = RCU_TORTURE_PIPE_LEN;
......
...@@ -530,58 +530,31 @@ static void force_quiescent_state(struct rcu_state *rsp); ...@@ -530,58 +530,31 @@ static void force_quiescent_state(struct rcu_state *rsp);
static int rcu_pending(void); static int rcu_pending(void);
/* /*
* Return the number of RCU batches started thus far for debug & stats. * Return the number of RCU GPs completed thus far for debug & stats.
*/ */
unsigned long rcu_batches_started(void) unsigned long rcu_get_gp_seq(void)
{ {
return rcu_state_p->gpnum; return rcu_seq_ctr(READ_ONCE(rcu_state_p->gp_seq));
} }
EXPORT_SYMBOL_GPL(rcu_batches_started); EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
/* /*
* Return the number of RCU-sched batches started thus far for debug & stats. * Return the number of RCU-sched GPs completed thus far for debug & stats.
*/ */
unsigned long rcu_batches_started_sched(void) unsigned long rcu_sched_get_gp_seq(void)
{ {
return rcu_sched_state.gpnum; return rcu_seq_ctr(READ_ONCE(rcu_sched_state.gp_seq));
} }
EXPORT_SYMBOL_GPL(rcu_batches_started_sched); EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
/* /*
* Return the number of RCU BH batches started thus far for debug & stats. * Return the number of RCU-bh GPs completed thus far for debug & stats.
*/ */
unsigned long rcu_batches_started_bh(void) unsigned long rcu_bh_get_gp_seq(void)
{ {
return rcu_bh_state.gpnum; return rcu_seq_ctr(READ_ONCE(rcu_bh_state.gp_seq));
} }
EXPORT_SYMBOL_GPL(rcu_batches_started_bh); EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
/*
* Return the number of RCU batches completed thus far for debug & stats.
*/
unsigned long rcu_batches_completed(void)
{
return rcu_state_p->completed;
}
EXPORT_SYMBOL_GPL(rcu_batches_completed);
/*
* Return the number of RCU-sched batches completed thus far for debug & stats.
*/
unsigned long rcu_batches_completed_sched(void)
{
return rcu_sched_state.completed;
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
/*
* Return the number of RCU BH batches completed thus far for debug & stats.
*/
unsigned long rcu_batches_completed_bh(void)
{
return rcu_bh_state.completed;
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
/* /*
* Return the number of RCU expedited batches completed thus far for * Return the number of RCU expedited batches completed thus far for
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment