Commit d283aa1b authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Mark accesses in tree_stall.h

This commit marks the accesses in tree_stall.h so as to both avoid
undesirable compiler optimizations and to keep KCSAN focused on the
accesses of the core algorithm.
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent f74126dc
...@@ -465,9 +465,10 @@ static void rcu_check_gp_kthread_starvation(void) ...@@ -465,9 +465,10 @@ static void rcu_check_gp_kthread_starvation(void)
pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n", pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n",
rcu_state.name, j, rcu_state.name, j,
(long)rcu_seq_current(&rcu_state.gp_seq), (long)rcu_seq_current(&rcu_state.gp_seq),
data_race(rcu_state.gp_flags), data_race(READ_ONCE(rcu_state.gp_flags)),
gp_state_getname(rcu_state.gp_state), rcu_state.gp_state, gp_state_getname(rcu_state.gp_state),
gpk ? gpk->__state : ~0, cpu); data_race(READ_ONCE(rcu_state.gp_state)),
gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu);
if (gpk) { if (gpk) {
pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name); pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
pr_err("RCU grace-period kthread stack dump:\n"); pr_err("RCU grace-period kthread stack dump:\n");
...@@ -510,7 +511,7 @@ static void rcu_check_gp_kthread_expired_fqs_timer(void) ...@@ -510,7 +511,7 @@ static void rcu_check_gp_kthread_expired_fqs_timer(void)
(long)rcu_seq_current(&rcu_state.gp_seq), (long)rcu_seq_current(&rcu_state.gp_seq),
data_race(rcu_state.gp_flags), data_race(rcu_state.gp_flags),
gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS, gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
gpk->__state); data_race(READ_ONCE(gpk->__state)));
pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n", pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu)); cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
} }
...@@ -569,11 +570,11 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps) ...@@ -569,11 +570,11 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
pr_err("INFO: Stall ended before state dump start\n"); pr_err("INFO: Stall ended before state dump start\n");
} else { } else {
j = jiffies; j = jiffies;
gpa = data_race(rcu_state.gp_activity); gpa = data_race(READ_ONCE(rcu_state.gp_activity));
pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
rcu_state.name, j - gpa, j, gpa, rcu_state.name, j - gpa, j, gpa,
data_race(jiffies_till_next_fqs), data_race(READ_ONCE(jiffies_till_next_fqs)),
rcu_get_root()->qsmask); data_race(READ_ONCE(rcu_get_root()->qsmask)));
} }
} }
/* Rewrite if needed in case of slow consoles. */ /* Rewrite if needed in case of slow consoles. */
...@@ -815,32 +816,34 @@ void show_rcu_gp_kthreads(void) ...@@ -815,32 +816,34 @@ void show_rcu_gp_kthreads(void)
struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
j = jiffies; j = jiffies;
ja = j - data_race(rcu_state.gp_activity); ja = j - data_race(READ_ONCE(rcu_state.gp_activity));
jr = j - data_race(rcu_state.gp_req_activity); jr = j - data_race(READ_ONCE(rcu_state.gp_req_activity));
js = j - data_race(rcu_state.gp_start); js = j - data_race(READ_ONCE(rcu_state.gp_start));
jw = j - data_race(rcu_state.gp_wake_time); jw = j - data_race(READ_ONCE(rcu_state.gp_wake_time));
pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n", pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
rcu_state.name, gp_state_getname(rcu_state.gp_state), rcu_state.name, gp_state_getname(rcu_state.gp_state),
rcu_state.gp_state, t ? t->__state : 0x1ffff, t ? t->rt_priority : 0xffU, data_race(READ_ONCE(rcu_state.gp_state)),
js, ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq), t ? data_race(READ_ONCE(t->__state)) : 0x1ffff, t ? t->rt_priority : 0xffU,
(long)data_race(rcu_state.gp_seq), js, ja, jr, jw, (long)data_race(READ_ONCE(rcu_state.gp_wake_seq)),
(long)data_race(rcu_get_root()->gp_seq_needed), (long)data_race(READ_ONCE(rcu_state.gp_seq)),
data_race(rcu_state.gp_max), (long)data_race(READ_ONCE(rcu_get_root()->gp_seq_needed)),
data_race(rcu_state.gp_flags)); data_race(READ_ONCE(rcu_state.gp_max)),
data_race(READ_ONCE(rcu_state.gp_flags)));
rcu_for_each_node_breadth_first(rnp) { rcu_for_each_node_breadth_first(rnp) {
if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) && if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) &&
!data_race(rnp->qsmask) && !data_race(rnp->boost_tasks) && !data_race(READ_ONCE(rnp->qsmask)) && !data_race(READ_ONCE(rnp->boost_tasks)) &&
!data_race(rnp->exp_tasks) && !data_race(rnp->gp_tasks)) !data_race(READ_ONCE(rnp->exp_tasks)) && !data_race(READ_ONCE(rnp->gp_tasks)))
continue; continue;
pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n", pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n",
rnp->grplo, rnp->grphi, rnp->grplo, rnp->grphi,
(long)data_race(rnp->gp_seq), (long)data_race(rnp->gp_seq_needed), (long)data_race(READ_ONCE(rnp->gp_seq)),
data_race(rnp->qsmask), (long)data_race(READ_ONCE(rnp->gp_seq_needed)),
".b"[!!data_race(rnp->boost_kthread_task)], data_race(READ_ONCE(rnp->qsmask)),
".B"[!!data_race(rnp->boost_tasks)], ".b"[!!data_race(READ_ONCE(rnp->boost_kthread_task))],
".E"[!!data_race(rnp->exp_tasks)], ".B"[!!data_race(READ_ONCE(rnp->boost_tasks))],
".G"[!!data_race(rnp->gp_tasks)], ".E"[!!data_race(READ_ONCE(rnp->exp_tasks))],
data_race(rnp->n_boosts)); ".G"[!!data_race(READ_ONCE(rnp->gp_tasks))],
data_race(READ_ONCE(rnp->n_boosts)));
if (!rcu_is_leaf_node(rnp)) if (!rcu_is_leaf_node(rnp))
continue; continue;
for_each_leaf_node_possible_cpu(rnp, cpu) { for_each_leaf_node_possible_cpu(rnp, cpu) {
...@@ -850,12 +853,12 @@ void show_rcu_gp_kthreads(void) ...@@ -850,12 +853,12 @@ void show_rcu_gp_kthreads(void)
READ_ONCE(rdp->gp_seq_needed))) READ_ONCE(rdp->gp_seq_needed)))
continue; continue;
pr_info("\tcpu %d ->gp_seq_needed %ld\n", pr_info("\tcpu %d ->gp_seq_needed %ld\n",
cpu, (long)data_race(rdp->gp_seq_needed)); cpu, (long)data_race(READ_ONCE(rdp->gp_seq_needed)));
} }
} }
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
rdp = per_cpu_ptr(&rcu_data, cpu); rdp = per_cpu_ptr(&rcu_data, cpu);
cbs += data_race(rdp->n_cbs_invoked); cbs += data_race(READ_ONCE(rdp->n_cbs_invoked));
if (rcu_segcblist_is_offloaded(&rdp->cblist)) if (rcu_segcblist_is_offloaded(&rdp->cblist))
show_rcu_nocb_state(rdp); show_rcu_nocb_state(rdp);
} }
...@@ -937,11 +940,11 @@ void rcu_fwd_progress_check(unsigned long j) ...@@ -937,11 +940,11 @@ void rcu_fwd_progress_check(unsigned long j)
if (rcu_gp_in_progress()) { if (rcu_gp_in_progress()) {
pr_info("%s: GP age %lu jiffies\n", pr_info("%s: GP age %lu jiffies\n",
__func__, jiffies - rcu_state.gp_start); __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_start)));
show_rcu_gp_kthreads(); show_rcu_gp_kthreads();
} else { } else {
pr_info("%s: Last GP end %lu jiffies ago\n", pr_info("%s: Last GP end %lu jiffies ago\n",
__func__, jiffies - rcu_state.gp_end); __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_end)));
preempt_disable(); preempt_disable();
rdp = this_cpu_ptr(&rcu_data); rdp = this_cpu_ptr(&rcu_data);
rcu_check_gp_start_stall(rdp->mynode, rdp, j); rcu_check_gp_start_stall(rdp->mynode, rdp, j);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment