Commit 67c583a7 authored by Boqun Feng's avatar Boqun Feng Committed by Paul E. McKenney

RCU: Privatize rcu_node::lock

In patch:

"rcu: Add transitivity to remaining rcu_node ->lock acquisitions"

All locking operations on rcu_node::lock are replaced with the wrappers
because of the need of transitivity, which indicates we should never
write code using LOCK primitives alone(i.e. without a proper barrier
following) on rcu_node::lock outside those wrappers. We could detect
this kind of misuses on rcu_node::lock in the future by adding __private
modifier on rcu_node::lock.

To privatize rcu_node::lock, unlock wrappers are also needed. Replacing
spinlock unlocks with these wrappers not only privatizes rcu_node::lock
but also makes it easier to figure out critical sections of rcu_node.

This patch adds __private modifier to rcu_node::lock and makes every
access to it wrapped by ACCESS_PRIVATE(). Besides, unlock wrappers are
added and raw_spin_unlock(&rnp->lock) and its friends are replaced with
those wrappers.
Signed-off-by: default avatarBoqun Feng <boqun.feng@gmail.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent ad315455
This diff is collapsed.
...@@ -149,8 +149,9 @@ struct rcu_dynticks { ...@@ -149,8 +149,9 @@ struct rcu_dynticks {
* Definition for node within the RCU grace-period-detection hierarchy. * Definition for node within the RCU grace-period-detection hierarchy.
*/ */
struct rcu_node { struct rcu_node {
raw_spinlock_t lock; /* Root rcu_node's lock protects some */ raw_spinlock_t __private lock; /* Root rcu_node's lock protects */
/* rcu_state fields as well as following. */ /* some rcu_state fields as well as */
/* following. */
unsigned long gpnum; /* Current grace period for this node. */ unsigned long gpnum; /* Current grace period for this node. */
/* This will either be equal to or one */ /* This will either be equal to or one */
/* behind the root rcu_node's gpnum. */ /* behind the root rcu_node's gpnum. */
...@@ -680,7 +681,7 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) ...@@ -680,7 +681,7 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
#endif /* #else #ifdef CONFIG_PPC */ #endif /* #else #ifdef CONFIG_PPC */
/* /*
* Wrappers for the rcu_node::lock acquire. * Wrappers for the rcu_node::lock acquire and release.
* *
* Because the rcu_nodes form a tree, the tree traversal locking will observe * Because the rcu_nodes form a tree, the tree traversal locking will observe
* different lock values, this in turn means that an UNLOCK of one level * different lock values, this in turn means that an UNLOCK of one level
...@@ -689,29 +690,48 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) ...@@ -689,29 +690,48 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
* *
* In order to restore full ordering between tree levels, augment the regular * In order to restore full ordering between tree levels, augment the regular
* lock acquire functions with smp_mb__after_unlock_lock(). * lock acquire functions with smp_mb__after_unlock_lock().
*
* As ->lock of struct rcu_node is a __private field, therefore one should use
* these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
*/ */
static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp) static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
{ {
raw_spin_lock(&rnp->lock); raw_spin_lock(&ACCESS_PRIVATE(rnp, lock));
smp_mb__after_unlock_lock(); smp_mb__after_unlock_lock();
} }
static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp)
{
raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock));
}
static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp) static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
{ {
raw_spin_lock_irq(&rnp->lock); raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock));
smp_mb__after_unlock_lock(); smp_mb__after_unlock_lock();
} }
static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp)
{
raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock));
}
#define raw_spin_lock_irqsave_rcu_node(rnp, flags) \ #define raw_spin_lock_irqsave_rcu_node(rnp, flags) \
do { \ do { \
typecheck(unsigned long, flags); \ typecheck(unsigned long, flags); \
raw_spin_lock_irqsave(&(rnp)->lock, flags); \ raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags); \
smp_mb__after_unlock_lock(); \ smp_mb__after_unlock_lock(); \
} while (0) } while (0)
#define raw_spin_unlock_irqrestore_rcu_node(rnp, flags) \
do { \
typecheck(unsigned long, flags); \
raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags); \
} while (0)
static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp) static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
{ {
bool locked = raw_spin_trylock(&rnp->lock); bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock));
if (locked) if (locked)
smp_mb__after_unlock_lock(); smp_mb__after_unlock_lock();
......
...@@ -235,7 +235,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) ...@@ -235,7 +235,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
rnp->gp_tasks = &t->rcu_node_entry; rnp->gp_tasks = &t->rcu_node_entry;
if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
rnp->exp_tasks = &t->rcu_node_entry; rnp->exp_tasks = &t->rcu_node_entry;
raw_spin_unlock(&rnp->lock); /* rrupts remain disabled. */ raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
/* /*
* Report the quiescent state for the expedited GP. This expedited * Report the quiescent state for the expedited GP. This expedited
...@@ -489,7 +489,7 @@ void rcu_read_unlock_special(struct task_struct *t) ...@@ -489,7 +489,7 @@ void rcu_read_unlock_special(struct task_struct *t)
!!rnp->gp_tasks); !!rnp->gp_tasks);
rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags); rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
} else { } else {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
/* Unboost if we were boosted. */ /* Unboost if we were boosted. */
...@@ -518,14 +518,14 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) ...@@ -518,14 +518,14 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
if (!rcu_preempt_blocked_readers_cgp(rnp)) { if (!rcu_preempt_blocked_readers_cgp(rnp)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return; return;
} }
t = list_entry(rnp->gp_tasks->prev, t = list_entry(rnp->gp_tasks->prev,
struct task_struct, rcu_node_entry); struct task_struct, rcu_node_entry);
list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
sched_show_task(t); sched_show_task(t);
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
/* /*
...@@ -990,7 +990,7 @@ static int rcu_boost(struct rcu_node *rnp) ...@@ -990,7 +990,7 @@ static int rcu_boost(struct rcu_node *rnp)
* might exit their RCU read-side critical sections on their own. * might exit their RCU read-side critical sections on their own.
*/ */
if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return 0; return 0;
} }
...@@ -1027,7 +1027,7 @@ static int rcu_boost(struct rcu_node *rnp) ...@@ -1027,7 +1027,7 @@ static int rcu_boost(struct rcu_node *rnp)
*/ */
t = container_of(tb, struct task_struct, rcu_node_entry); t = container_of(tb, struct task_struct, rcu_node_entry);
rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
/* Lock only for side effect: boosts task t's priority. */ /* Lock only for side effect: boosts task t's priority. */
rt_mutex_lock(&rnp->boost_mtx); rt_mutex_lock(&rnp->boost_mtx);
rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
...@@ -1087,7 +1087,7 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) ...@@ -1087,7 +1087,7 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
rnp->n_balk_exp_gp_tasks++; rnp->n_balk_exp_gp_tasks++;
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return; return;
} }
if (rnp->exp_tasks != NULL || if (rnp->exp_tasks != NULL ||
...@@ -1097,13 +1097,13 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) ...@@ -1097,13 +1097,13 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
ULONG_CMP_GE(jiffies, rnp->boost_time))) { ULONG_CMP_GE(jiffies, rnp->boost_time))) {
if (rnp->exp_tasks == NULL) if (rnp->exp_tasks == NULL)
rnp->boost_tasks = rnp->gp_tasks; rnp->boost_tasks = rnp->gp_tasks;
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
t = rnp->boost_kthread_task; t = rnp->boost_kthread_task;
if (t) if (t)
rcu_wake_cond(t, rnp->boost_kthread_status); rcu_wake_cond(t, rnp->boost_kthread_status);
} else { } else {
rcu_initiate_boost_trace(rnp); rcu_initiate_boost_trace(rnp);
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
} }
...@@ -1171,7 +1171,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, ...@@ -1171,7 +1171,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
return PTR_ERR(t); return PTR_ERR(t);
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp->boost_kthread_task = t; rnp->boost_kthread_task = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
sp.sched_priority = kthread_prio; sp.sched_priority = kthread_prio;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
...@@ -1307,7 +1307,7 @@ static void rcu_prepare_kthreads(int cpu) ...@@ -1307,7 +1307,7 @@ static void rcu_prepare_kthreads(int cpu)
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
__releases(rnp->lock) __releases(rnp->lock)
{ {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
static void invoke_rcu_callbacks_kthread(void) static void invoke_rcu_callbacks_kthread(void)
...@@ -1558,7 +1558,7 @@ static void rcu_prepare_for_idle(void) ...@@ -1558,7 +1558,7 @@ static void rcu_prepare_for_idle(void)
rnp = rdp->mynode; rnp = rdp->mynode;
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
needwake = rcu_accelerate_cbs(rsp, rnp, rdp); needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
if (needwake) if (needwake)
rcu_gp_kthread_wake(rsp); rcu_gp_kthread_wake(rsp);
} }
...@@ -2058,7 +2058,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) ...@@ -2058,7 +2058,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
needwake = rcu_start_future_gp(rnp, rdp, &c); needwake = rcu_start_future_gp(rnp, rdp, &c);
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
if (needwake) if (needwake)
rcu_gp_kthread_wake(rdp->rsp); rcu_gp_kthread_wake(rdp->rsp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment