Commit abb06b99 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Pull rcu_sched_qs_mask into rcu_dynticks structure

The rcu_sched_qs_mask variable is yet another isolated per-CPU variable,
so this commit pulls it into the pre-existing rcu_dynticks per-CPU
structure.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 88a4976d
...@@ -1104,6 +1104,7 @@ Its fields are as follows: ...@@ -1104,6 +1104,7 @@ Its fields are as follows:
1 int dynticks_nesting; 1 int dynticks_nesting;
2 int dynticks_nmi_nesting; 2 int dynticks_nmi_nesting;
3 atomic_t dynticks; 3 atomic_t dynticks;
4 int rcu_sched_qs_mask;
</pre> </pre>
<p>The <tt>-&gt;dynticks_nesting</tt> field counts the <p>The <tt>-&gt;dynticks_nesting</tt> field counts the
...@@ -1117,11 +1118,17 @@ NMIs are counted by the <tt>-&gt;dynticks_nmi_nesting</tt> ...@@ -1117,11 +1118,17 @@ NMIs are counted by the <tt>-&gt;dynticks_nmi_nesting</tt>
field, except that NMIs that interrupt non-dyntick-idle execution field, except that NMIs that interrupt non-dyntick-idle execution
are not counted. are not counted.
</p><p>Finally, the <tt>-&gt;dynticks</tt> field counts the corresponding </p><p>The <tt>-&gt;dynticks</tt> field counts the corresponding
CPU's transitions to and from dyntick-idle mode, so that this counter CPU's transitions to and from dyntick-idle mode, so that this counter
has an even value when the CPU is in dyntick-idle mode and an odd has an even value when the CPU is in dyntick-idle mode and an odd
value otherwise. value otherwise.
</p><p>Finally, the <tt>-&gt;rcu_sched_qs_mask</tt> field is used
to record the fact that the RCU core code would really like to
see a quiescent state from the corresponding CPU.
This flag is checked by RCU's context-switch and <tt>cond_resched()</tt>
code, which provide a momentary idle sojourn in response.
<table> <table>
<tr><th>&nbsp;</th></tr> <tr><th>&nbsp;</th></tr>
<tr><th align="left">Quick Quiz:</th></tr> <tr><th align="left">Quick Quiz:</th></tr>
......
...@@ -272,8 +272,6 @@ void rcu_bh_qs(void) ...@@ -272,8 +272,6 @@ void rcu_bh_qs(void)
} }
} }
static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
/* /*
* Steal a bit from the bottom of ->dynticks for idle entry/exit * Steal a bit from the bottom of ->dynticks for idle entry/exit
* control. Initially this is for TLB flushing. * control. Initially this is for TLB flushing.
...@@ -464,8 +462,8 @@ static void rcu_momentary_dyntick_idle(void) ...@@ -464,8 +462,8 @@ static void rcu_momentary_dyntick_idle(void)
* Yes, we can lose flag-setting operations. This is OK, because * Yes, we can lose flag-setting operations. This is OK, because
* the flag will be set again after some delay. * the flag will be set again after some delay.
*/ */
resched_mask = raw_cpu_read(rcu_sched_qs_mask); resched_mask = raw_cpu_read(rcu_dynticks.rcu_sched_qs_mask);
raw_cpu_write(rcu_sched_qs_mask, 0); raw_cpu_write(rcu_dynticks.rcu_sched_qs_mask, 0);
/* Find the flavor that needs a quiescent state. */ /* Find the flavor that needs a quiescent state. */
for_each_rcu_flavor(rsp) { for_each_rcu_flavor(rsp) {
...@@ -499,7 +497,7 @@ void rcu_note_context_switch(void) ...@@ -499,7 +497,7 @@ void rcu_note_context_switch(void)
trace_rcu_utilization(TPS("Start context switch")); trace_rcu_utilization(TPS("Start context switch"));
rcu_sched_qs(); rcu_sched_qs();
rcu_preempt_note_context_switch(); rcu_preempt_note_context_switch();
if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) if (unlikely(raw_cpu_read(rcu_dynticks.rcu_sched_qs_mask)))
rcu_momentary_dyntick_idle(); rcu_momentary_dyntick_idle();
trace_rcu_utilization(TPS("End context switch")); trace_rcu_utilization(TPS("End context switch"));
barrier(); /* Avoid RCU read-side critical sections leaking up. */ barrier(); /* Avoid RCU read-side critical sections leaking up. */
...@@ -524,7 +522,7 @@ void rcu_all_qs(void) ...@@ -524,7 +522,7 @@ void rcu_all_qs(void)
unsigned long flags; unsigned long flags;
barrier(); /* Avoid RCU read-side critical sections leaking down. */ barrier(); /* Avoid RCU read-side critical sections leaking down. */
if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) { if (unlikely(raw_cpu_read(rcu_dynticks.rcu_sched_qs_mask))) {
local_irq_save(flags); local_irq_save(flags);
rcu_momentary_dyntick_idle(); rcu_momentary_dyntick_idle();
local_irq_restore(flags); local_irq_restore(flags);
...@@ -1351,7 +1349,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, ...@@ -1351,7 +1349,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
* is set too high, we override with half of the RCU CPU stall * is set too high, we override with half of the RCU CPU stall
* warning delay. * warning delay.
*/ */
rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu); rcrmp = &per_cpu(rcu_dynticks.rcu_sched_qs_mask, rdp->cpu);
if (time_after(jiffies, rdp->rsp->gp_start + jtsq) || if (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
time_after(jiffies, rdp->rsp->jiffies_resched)) { time_after(jiffies, rdp->rsp->jiffies_resched)) {
if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) { if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
......
...@@ -113,6 +113,7 @@ struct rcu_dynticks { ...@@ -113,6 +113,7 @@ struct rcu_dynticks {
/* Process level is worth LLONG_MAX/2. */ /* Process level is worth LLONG_MAX/2. */
int dynticks_nmi_nesting; /* Track NMI nesting level. */ int dynticks_nmi_nesting; /* Track NMI nesting level. */
atomic_t dynticks; /* Even value for idle, else odd. */ atomic_t dynticks; /* Even value for idle, else odd. */
int rcu_sched_qs_mask; /* GP old, need quiescent state. */
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
long long dynticks_idle_nesting; long long dynticks_idle_nesting;
/* irq/process nesting level from idle. */ /* irq/process nesting level from idle. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment