Commit cee43939 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Rename cond_resched_rcu_qs() to cond_resched_tasks_rcu_qs()

Commit e31d28b6 ("trace: Eliminate cond_resched_rcu_qs() in favor
of cond_resched()") substituted cond_resched() for the earlier call
to cond_resched_rcu_qs().  However, the new-age cond_resched() does
not do anything to help RCU-tasks grace periods because (1) RCU-tasks
is only enabled when CONFIG_PREEMPT=y and (2) cond_resched() is a
complete no-op when preemption is enabled.  This situation results
in hangs when running the trace benchmarks.

A number of potential fixes were discussed on LKML
(https://lkml.kernel.org/r/20180224151240.0d63a059@vmware.local.home),
including making cond_resched() not be a no-op; making cond_resched()
not be a no-op, but only when running tracing benchmarks; reverting
the aforementioned commit (which works because cond_resched_rcu_qs()
does provide an RCU-tasks quiescent state; and adding a call to the
scheduler/RCU rcu_note_voluntary_context_switch() function.  All were
deemed unsatisfactory, either due to added cond_resched() overhead or
due to magic functions inviting cargo culting.

This commit renames cond_resched_rcu_qs() to cond_resched_tasks_rcu_qs(),
which provides a clear hint as to what this function is doing and
why and where it should be used, and then replaces the call to
cond_resched() with cond_resched_tasks_rcu_qs() in the trace benchmark's
benchmark_event_kthread() function.
Reported-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: default avatarNicholas Piggin <npiggin@gmail.com>
parent 6fba2b37
...@@ -188,13 +188,13 @@ static inline void exit_tasks_rcu_finish(void) { } ...@@ -188,13 +188,13 @@ static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU */ #endif /* #else #ifdef CONFIG_TASKS_RCU */
/** /**
* cond_resched_rcu_qs - Report potential quiescent states to RCU * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
* *
* This macro resembles cond_resched(), except that it is defined to * This macro resembles cond_resched(), except that it is defined to
* report potential quiescent states to RCU-tasks even if the cond_resched() * report potential quiescent states to RCU-tasks even if the cond_resched()
* machinery were to be shut off, as some advocate for PREEMPT kernels. * machinery were to be shut off, as some advocate for PREEMPT kernels.
*/ */
#define cond_resched_rcu_qs() \ #define cond_resched_tasks_rcu_qs() \
do { \ do { \
if (!cond_resched()) \ if (!cond_resched()) \
rcu_note_voluntary_context_switch_lite(current); \ rcu_note_voluntary_context_switch_lite(current); \
......
...@@ -369,7 +369,7 @@ static bool __maybe_unused torturing_tasks(void) ...@@ -369,7 +369,7 @@ static bool __maybe_unused torturing_tasks(void)
*/ */
static void rcu_perf_wait_shutdown(void) static void rcu_perf_wait_shutdown(void)
{ {
cond_resched_rcu_qs(); cond_resched_tasks_rcu_qs();
if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters) if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters)
return; return;
while (!torture_must_stop()) while (!torture_must_stop())
......
...@@ -1234,10 +1234,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) ...@@ -1234,10 +1234,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
} }
/* /*
* Has this CPU encountered a cond_resched_rcu_qs() since the * Has this CPU encountered a cond_resched() since the beginning
* beginning of the grace period? For this to be the case, * of the grace period? For this to be the case, the CPU has to
* the CPU has to have noticed the current grace period. This * have noticed the current grace period. This might not be the
* might not be the case for nohz_full CPUs looping in the kernel. * case for nohz_full CPUs looping in the kernel.
*/ */
jtsq = jiffies_till_sched_qs; jtsq = jiffies_till_sched_qs;
ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu); ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
...@@ -2049,7 +2049,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) ...@@ -2049,7 +2049,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
rnp->level, rnp->grplo, rnp->level, rnp->grplo,
rnp->grphi, rnp->qsmask); rnp->grphi, rnp->qsmask);
raw_spin_unlock_irq_rcu_node(rnp); raw_spin_unlock_irq_rcu_node(rnp);
cond_resched_rcu_qs(); cond_resched_tasks_rcu_qs();
WRITE_ONCE(rsp->gp_activity, jiffies); WRITE_ONCE(rsp->gp_activity, jiffies);
} }
...@@ -2151,7 +2151,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) ...@@ -2151,7 +2151,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
sq = rcu_nocb_gp_get(rnp); sq = rcu_nocb_gp_get(rnp);
raw_spin_unlock_irq_rcu_node(rnp); raw_spin_unlock_irq_rcu_node(rnp);
rcu_nocb_gp_cleanup(sq); rcu_nocb_gp_cleanup(sq);
cond_resched_rcu_qs(); cond_resched_tasks_rcu_qs();
WRITE_ONCE(rsp->gp_activity, jiffies); WRITE_ONCE(rsp->gp_activity, jiffies);
rcu_gp_slow(rsp, gp_cleanup_delay); rcu_gp_slow(rsp, gp_cleanup_delay);
} }
...@@ -2202,7 +2202,7 @@ static int __noreturn rcu_gp_kthread(void *arg) ...@@ -2202,7 +2202,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
/* Locking provides needed memory barrier. */ /* Locking provides needed memory barrier. */
if (rcu_gp_init(rsp)) if (rcu_gp_init(rsp))
break; break;
cond_resched_rcu_qs(); cond_resched_tasks_rcu_qs();
WRITE_ONCE(rsp->gp_activity, jiffies); WRITE_ONCE(rsp->gp_activity, jiffies);
WARN_ON(signal_pending(current)); WARN_ON(signal_pending(current));
trace_rcu_grace_period(rsp->name, trace_rcu_grace_period(rsp->name,
...@@ -2247,7 +2247,7 @@ static int __noreturn rcu_gp_kthread(void *arg) ...@@ -2247,7 +2247,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
trace_rcu_grace_period(rsp->name, trace_rcu_grace_period(rsp->name,
READ_ONCE(rsp->gpnum), READ_ONCE(rsp->gpnum),
TPS("fqsend")); TPS("fqsend"));
cond_resched_rcu_qs(); cond_resched_tasks_rcu_qs();
WRITE_ONCE(rsp->gp_activity, jiffies); WRITE_ONCE(rsp->gp_activity, jiffies);
ret = 0; /* Force full wait till next FQS. */ ret = 0; /* Force full wait till next FQS. */
j = jiffies_till_next_fqs; j = jiffies_till_next_fqs;
...@@ -2260,7 +2260,7 @@ static int __noreturn rcu_gp_kthread(void *arg) ...@@ -2260,7 +2260,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
} }
} else { } else {
/* Deal with stray signal. */ /* Deal with stray signal. */
cond_resched_rcu_qs(); cond_resched_tasks_rcu_qs();
WRITE_ONCE(rsp->gp_activity, jiffies); WRITE_ONCE(rsp->gp_activity, jiffies);
WARN_ON(signal_pending(current)); WARN_ON(signal_pending(current));
trace_rcu_grace_period(rsp->name, trace_rcu_grace_period(rsp->name,
...@@ -2782,7 +2782,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp)) ...@@ -2782,7 +2782,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
struct rcu_node *rnp; struct rcu_node *rnp;
rcu_for_each_leaf_node(rsp, rnp) { rcu_for_each_leaf_node(rsp, rnp) {
cond_resched_rcu_qs(); cond_resched_tasks_rcu_qs();
mask = 0; mask = 0;
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
if (rnp->qsmask == 0) { if (rnp->qsmask == 0) {
......
...@@ -1598,7 +1598,7 @@ static int rcu_oom_notify(struct notifier_block *self, ...@@ -1598,7 +1598,7 @@ static int rcu_oom_notify(struct notifier_block *self,
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1); smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
cond_resched_rcu_qs(); cond_resched_tasks_rcu_qs();
} }
/* Unconditionally decrement: no need to wake ourselves up. */ /* Unconditionally decrement: no need to wake ourselves up. */
...@@ -2227,7 +2227,7 @@ static int rcu_nocb_kthread(void *arg) ...@@ -2227,7 +2227,7 @@ static int rcu_nocb_kthread(void *arg)
cl++; cl++;
c++; c++;
local_bh_enable(); local_bh_enable();
cond_resched_rcu_qs(); cond_resched_tasks_rcu_qs();
list = next; list = next;
} }
trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
......
...@@ -624,7 +624,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks); ...@@ -624,7 +624,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks);
* grace period has elapsed, in other words after all currently * grace period has elapsed, in other words after all currently
* executing rcu-tasks read-side critical sections have elapsed. These * executing rcu-tasks read-side critical sections have elapsed. These
* read-side critical sections are delimited by calls to schedule(), * read-side critical sections are delimited by calls to schedule(),
* cond_resched_rcu_qs(), idle execution, userspace execution, calls * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
* to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
* *
* This is a very specialized primitive, intended only for a few uses in * This is a very specialized primitive, intended only for a few uses in
......
...@@ -574,7 +574,7 @@ void stutter_wait(const char *title) ...@@ -574,7 +574,7 @@ void stutter_wait(const char *title)
{ {
int spt; int spt;
cond_resched_rcu_qs(); cond_resched_tasks_rcu_qs();
spt = READ_ONCE(stutter_pause_test); spt = READ_ONCE(stutter_pause_test);
for (; spt; spt = READ_ONCE(stutter_pause_test)) { for (; spt; spt = READ_ONCE(stutter_pause_test)) {
if (spt == 1) { if (spt == 1) {
......
...@@ -159,13 +159,13 @@ static int benchmark_event_kthread(void *arg) ...@@ -159,13 +159,13 @@ static int benchmark_event_kthread(void *arg)
* wants to run, schedule in, but if the CPU is idle, * wants to run, schedule in, but if the CPU is idle,
* we'll keep burning cycles. * we'll keep burning cycles.
* *
* Note the _rcu_qs() version of cond_resched() will * Note the tasks_rcu_qs() version of cond_resched() will
* notify synchronize_rcu_tasks() that this thread has * notify synchronize_rcu_tasks() that this thread has
* passed a quiescent state for rcu_tasks. Otherwise * passed a quiescent state for rcu_tasks. Otherwise
* this thread will never voluntarily schedule which would * this thread will never voluntarily schedule which would
* block synchronize_rcu_tasks() indefinitely. * block synchronize_rcu_tasks() indefinitely.
*/ */
cond_resched(); cond_resched_tasks_rcu_qs();
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment