Commit cc99a310 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Move rcu_report_unblock_qs_rnp() to common code

The rcu_report_unblock_qs_rnp() function is invoked when the
last task blocking the current grace period exits its outermost
RCU read-side critical section.  Previously, this was called only
from rcu_read_unlock_special(), and was therefore defined only when
CONFIG_RCU_PREEMPT=y.  However, this function will be invoked even when
CONFIG_RCU_PREEMPT=n once CPU-hotplug operations are processed only at
the beginnings of RCU grace periods.  The reason for this change is that
the last task on a given leaf rcu_node structure's ->blkd_tasks list
might well exit its RCU read-side critical section between the time that
recent CPU-hotplug operations were applied and when the new grace period
was initialized.  This situation could result in RCU waiting forever on
that leaf rcu_node structure, because if all that structure's CPUs were
already offline, there would be no quiescent-state events to drive that
structure's part of the grace period.

This commit therefore moves rcu_report_unblock_qs_rnp() to common code
that is built unconditionally so that the quiescent-state-forcing code
can clean up after this situation, avoiding the grace-period stall.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 8eb74b2b
...@@ -2126,6 +2126,45 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, ...@@ -2126,6 +2126,45 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */ rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
} }
/*
* Record a quiescent state for all tasks that were previously queued
* on the specified rcu_node structure and that were blocking the current
* RCU grace period. The caller must hold the specified rnp->lock with
* irqs disabled, and this lock is released upon return, but irqs remain
* disabled.
*/
static void __maybe_unused rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
struct rcu_node *rnp, unsigned long flags)
__releases(rnp->lock)
{
unsigned long mask;
struct rcu_node *rnp_p;
WARN_ON_ONCE(rsp == &rcu_bh_state || rsp == &rcu_sched_state);
if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
return; /* Still need more quiescent states! */
}
rnp_p = rnp->parent;
if (rnp_p == NULL) {
/*
* Either there is only one rcu_node in the tree,
* or tasks were kicked up to root rcu_node due to
* CPUs going offline.
*/
rcu_report_qs_rsp(rsp, flags);
return;
}
/* Report up the rest of the hierarchy. */
mask = rnp->grpmask;
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
smp_mb__after_unlock_lock();
rcu_report_qs_rnp(mask, rsp, rnp_p, flags);
}
/* /*
* Record a quiescent state for the specified CPU to that CPU's rcu_data * Record a quiescent state for the specified CPU to that CPU's rcu_data
* structure. This must be either called from the specified CPU, or * structure. This must be either called from the specified CPU, or
......
...@@ -232,43 +232,6 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) ...@@ -232,43 +232,6 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
return rnp->gp_tasks != NULL; return rnp->gp_tasks != NULL;
} }
/*
* Record a quiescent state for all tasks that were previously queued
* on the specified rcu_node structure and that were blocking the current
* RCU grace period. The caller must hold the specified rnp->lock with
* irqs disabled, and this lock is released upon return, but irqs remain
* disabled.
*/
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
__releases(rnp->lock)
{
unsigned long mask;
struct rcu_node *rnp_p;
if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
return; /* Still need more quiescent states! */
}
rnp_p = rnp->parent;
if (rnp_p == NULL) {
/*
* Either there is only one rcu_node in the tree,
* or tasks were kicked up to root rcu_node due to
* CPUs going offline.
*/
rcu_report_qs_rsp(&rcu_preempt_state, flags);
return;
}
/* Report up the rest of the hierarchy. */
mask = rnp->grpmask;
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
smp_mb__after_unlock_lock();
rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
}
/* /*
* Advance a ->blkd_tasks-list pointer to the next entry, instead * Advance a ->blkd_tasks-list pointer to the next entry, instead
* returning NULL if at the end of the list. * returning NULL if at the end of the list.
...@@ -399,7 +362,8 @@ void rcu_read_unlock_special(struct task_struct *t) ...@@ -399,7 +362,8 @@ void rcu_read_unlock_special(struct task_struct *t)
rnp->grplo, rnp->grplo,
rnp->grphi, rnp->grphi,
!!rnp->gp_tasks); !!rnp->gp_tasks);
rcu_report_unblock_qs_rnp(rnp, flags); rcu_report_unblock_qs_rnp(&rcu_preempt_state,
rnp, flags);
} else { } else {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment