Commit da1df50d authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Remove rcu_state structure's ->rda field

The rcu_state structure's ->rda field was used to find the per-CPU
rcu_data structures corresponding to that rcu_state structure.  But now
there is only one rcu_state structure (creatively named "rcu_state")
and one set of per-CPU rcu_data structures (creatively named "rcu_data").
Therefore, uses of the ->rda field can always be replaced by "rcu_data,
and this commit makes that change and removes the ->rda field.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent ec5dd444
This diff is collapsed.
......@@ -312,7 +312,6 @@ struct rcu_state {
struct rcu_node *level[RCU_NUM_LVLS + 1];
/* Hierarchy levels (+1 to */
/* shut bogus gcc warning) */
struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
int ncpus; /* # CPUs seen so far. */
/* The following fields are guarded by the root rcu_node's lock. */
......
......@@ -286,7 +286,7 @@ static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s)
*/
static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
{
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
struct rcu_node *rnp = rdp->mynode;
struct rcu_node *rnp_root = rcu_get_root(rsp);
......@@ -361,7 +361,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
mask_ofl_test = 0;
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
struct rcu_dynticks *rdtp = per_cpu_ptr(&rcu_dynticks, cpu);
int snap;
......@@ -390,7 +390,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
/* IPI the remaining CPUs for expedited quiescent state. */
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
if (!(mask_ofl_ipi & mask))
continue;
......@@ -509,7 +509,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
if (!(rnp->expmask & mask))
continue;
ndetected++;
rdp = per_cpu_ptr(rsp->rda, cpu);
rdp = per_cpu_ptr(&rcu_data, cpu);
pr_cont(" %d-%c%c%c", cpu,
"O."[!!cpu_online(cpu)],
"o."[!!(rdp->grpmask & rnp->expmaskinit)],
......@@ -642,7 +642,7 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
}
/* Wait for expedited grace period to complete. */
rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
rnp = rcu_get_root(rsp);
wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
sync_exp_work_done(rsp, s));
......@@ -665,7 +665,7 @@ static void sync_rcu_exp_handler(void *info)
{
unsigned long flags;
struct rcu_state *rsp = info;
struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct rcu_node *rnp = rdp->mynode;
struct task_struct *t = current;
......@@ -772,13 +772,12 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
#else /* #ifdef CONFIG_PREEMPT_RCU */
/* Invoked on each online non-idle CPU for expedited quiescent state. */
static void sync_sched_exp_handler(void *data)
static void sync_sched_exp_handler(void *unused)
{
struct rcu_data *rdp;
struct rcu_node *rnp;
struct rcu_state *rsp = data;
rdp = this_cpu_ptr(rsp->rda);
rdp = this_cpu_ptr(&rcu_data);
rnp = rdp->mynode;
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
......@@ -801,7 +800,7 @@ static void sync_sched_exp_online_cleanup(int cpu)
struct rcu_node *rnp;
struct rcu_state *rsp = &rcu_state;
rdp = per_cpu_ptr(rsp->rda, cpu);
rdp = per_cpu_ptr(&rcu_data, cpu);
rnp = rdp->mynode;
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
return;
......
......@@ -328,7 +328,7 @@ static void rcu_qs(void)
void rcu_note_context_switch(bool preempt)
{
struct task_struct *t = current;
struct rcu_data *rdp = this_cpu_ptr(rcu_state_p->rda);
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct rcu_node *rnp;
barrier(); /* Avoid RCU read-side critical sections leaking down. */
......@@ -488,7 +488,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
* t->rcu_read_unlock_special cannot change.
*/
special = t->rcu_read_unlock_special;
rdp = this_cpu_ptr(rcu_state_p->rda);
rdp = this_cpu_ptr(&rcu_data);
if (!special.s && !rdp->deferred_qs) {
local_irq_restore(flags);
return;
......@@ -911,7 +911,7 @@ dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
}
pr_cont("\n");
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
rdp = per_cpu_ptr(rsp->rda, cpu);
rdp = per_cpu_ptr(&rcu_data, cpu);
onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
cpu, ".o"[onl],
......@@ -1437,7 +1437,7 @@ static void __init rcu_spawn_boost_kthreads(void)
static void rcu_prepare_kthreads(int cpu)
{
struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
struct rcu_node *rnp = rdp->mynode;
/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
......@@ -1574,7 +1574,7 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
rdtp->last_advance_all = jiffies;
for_each_rcu_flavor(rsp) {
rdp = this_cpu_ptr(rsp->rda);
rdp = this_cpu_ptr(&rcu_data);
rnp = rdp->mynode;
/*
......@@ -1692,7 +1692,7 @@ static void rcu_prepare_for_idle(void)
return;
rdtp->last_accelerate = jiffies;
for_each_rcu_flavor(rsp) {
rdp = this_cpu_ptr(rsp->rda);
rdp = this_cpu_ptr(&rcu_data);
if (!rcu_segcblist_pend_cbs(&rdp->cblist))
continue;
rnp = rdp->mynode;
......@@ -1778,7 +1778,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
{
unsigned long delta;
char fast_no_hz[72];
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
struct rcu_dynticks *rdtp = rdp->dynticks;
char *ticks_title;
unsigned long ticks_value;
......@@ -1833,7 +1833,7 @@ static void increment_cpu_stall_ticks(void)
struct rcu_state *rsp;
for_each_rcu_flavor(rsp)
raw_cpu_inc(rsp->rda->ticks_this_gp);
raw_cpu_inc(rcu_data.ticks_this_gp);
}
#ifdef CONFIG_RCU_NOCB_CPU
......@@ -1965,7 +1965,7 @@ static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype,
*/
static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
{
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
unsigned long ret;
#ifdef CONFIG_PROVE_RCU
struct rcu_head *rhp;
......@@ -2426,7 +2426,7 @@ void __init rcu_init_nohz(void)
for_each_rcu_flavor(rsp) {
for_each_cpu(cpu, rcu_nocb_mask)
init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu));
init_nocb_callback_list(per_cpu_ptr(&rcu_data, cpu));
rcu_organize_nocb_kthreads(rsp);
}
}
......@@ -2452,7 +2452,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
struct rcu_data *rdp;
struct rcu_data *rdp_last;
struct rcu_data *rdp_old_leader;
struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu);
struct rcu_data *rdp_spawn = per_cpu_ptr(&rcu_data, cpu);
struct task_struct *t;
/*
......@@ -2545,7 +2545,7 @@ static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
* we will spawn the needed set of rcu_nocb_kthread() kthreads.
*/
for_each_cpu(cpu, rcu_nocb_mask) {
rdp = per_cpu_ptr(rsp->rda, cpu);
rdp = per_cpu_ptr(&rcu_data, cpu);
if (rdp->cpu >= nl) {
/* New leader, set up for followers & next leader. */
nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment