Commit 6f56f714 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Improve RCU-tasks naming and comments

The naming and comments associated with some RCU-tasks code make
the faulty assumption that context switches due to cond_resched()
are voluntary.  As several people pointed out, this is not the case.
This commit therefore updates function names and comments to better
reflect current reality.
Reported-by: default avatarByungchul Park <byungchul.park@lge.com>
Reported-by: default avatarJoel Fernandes <joel@joelfernandes.org>
Reported-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent a7538352
...@@ -158,11 +158,11 @@ static inline void rcu_init_nohz(void) { } ...@@ -158,11 +158,11 @@ static inline void rcu_init_nohz(void) { }
} while (0) } while (0)
/* /*
* Note a voluntary context switch for RCU-tasks benefit. This is a * Note a quasi-voluntary context switch for RCU-tasks's benefit.
* macro rather than an inline function to avoid #include hell. * This is a macro rather than an inline function to avoid #include hell.
*/ */
#ifdef CONFIG_TASKS_RCU #ifdef CONFIG_TASKS_RCU
#define rcu_note_voluntary_context_switch_lite(t) \ #define rcu_tasks_qs(t) \
do { \ do { \
if (READ_ONCE((t)->rcu_tasks_holdout)) \ if (READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \ WRITE_ONCE((t)->rcu_tasks_holdout, false); \
...@@ -170,14 +170,14 @@ static inline void rcu_init_nohz(void) { } ...@@ -170,14 +170,14 @@ static inline void rcu_init_nohz(void) { }
#define rcu_note_voluntary_context_switch(t) \ #define rcu_note_voluntary_context_switch(t) \
do { \ do { \
rcu_all_qs(); \ rcu_all_qs(); \
rcu_note_voluntary_context_switch_lite(t); \ rcu_tasks_qs(t); \
} while (0) } while (0)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void); void synchronize_rcu_tasks(void);
void exit_tasks_rcu_start(void); void exit_tasks_rcu_start(void);
void exit_tasks_rcu_finish(void); void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU */ #else /* #ifdef CONFIG_TASKS_RCU */
#define rcu_note_voluntary_context_switch_lite(t) do { } while (0) #define rcu_tasks_qs(t) do { } while (0)
#define rcu_note_voluntary_context_switch(t) rcu_all_qs() #define rcu_note_voluntary_context_switch(t) rcu_all_qs()
#define call_rcu_tasks call_rcu_sched #define call_rcu_tasks call_rcu_sched
#define synchronize_rcu_tasks synchronize_sched #define synchronize_rcu_tasks synchronize_sched
...@@ -194,7 +194,7 @@ static inline void exit_tasks_rcu_finish(void) { } ...@@ -194,7 +194,7 @@ static inline void exit_tasks_rcu_finish(void) { }
*/ */
#define cond_resched_tasks_rcu_qs() \ #define cond_resched_tasks_rcu_qs() \
do { \ do { \
rcu_note_voluntary_context_switch_lite(current); \ rcu_tasks_qs(current); \
cond_resched(); \ cond_resched(); \
} while (0) } while (0)
......
...@@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head, ...@@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
#define rcu_note_context_switch(preempt) \ #define rcu_note_context_switch(preempt) \
do { \ do { \
rcu_sched_qs(); \ rcu_sched_qs(); \
rcu_note_voluntary_context_switch_lite(current); \ rcu_tasks_qs(current); \
} while (0) } while (0)
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
......
...@@ -457,7 +457,7 @@ void rcu_note_context_switch(bool preempt) ...@@ -457,7 +457,7 @@ void rcu_note_context_switch(bool preempt)
rcu_momentary_dyntick_idle(); rcu_momentary_dyntick_idle();
this_cpu_inc(rcu_dynticks.rcu_qs_ctr); this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
if (!preempt) if (!preempt)
rcu_note_voluntary_context_switch_lite(current); rcu_tasks_qs(current);
out: out:
trace_rcu_utilization(TPS("End context switch")); trace_rcu_utilization(TPS("End context switch"));
barrier(); /* Avoid RCU read-side critical sections leaking up. */ barrier(); /* Avoid RCU read-side critical sections leaking up. */
......
...@@ -507,14 +507,15 @@ early_initcall(check_cpu_stall_init); ...@@ -507,14 +507,15 @@ early_initcall(check_cpu_stall_init);
#ifdef CONFIG_TASKS_RCU #ifdef CONFIG_TASKS_RCU
/* /*
* Simple variant of RCU whose quiescent states are voluntary context switch, * Simple variant of RCU whose quiescent states are voluntary context
* user-space execution, and idle. As such, grace periods can take one good * switch, cond_resched_rcu_qs(), user-space execution, and idle.
* long time. There are no read-side primitives similar to rcu_read_lock() * As such, grace periods can take one good long time. There are no
* and rcu_read_unlock() because this implementation is intended to get * read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
* the system into a safe state for some of the manipulations involved in * because this implementation is intended to get the system into a safe
* tracing and the like. Finally, this implementation does not support * state for some of the manipulations involved in tracing and the like.
* high call_rcu_tasks() rates from multiple CPUs. If this is required, * Finally, this implementation does not support high call_rcu_tasks()
* per-CPU callback lists will be needed. * rates from multiple CPUs. If this is required, per-CPU callback lists
* will be needed.
*/ */
/* Global list of callbacks and associated lock. */ /* Global list of callbacks and associated lock. */
...@@ -542,11 +543,11 @@ static struct task_struct *rcu_tasks_kthread_ptr; ...@@ -542,11 +543,11 @@ static struct task_struct *rcu_tasks_kthread_ptr;
* period elapses, in other words after all currently executing RCU * period elapses, in other words after all currently executing RCU
* read-side critical sections have completed. call_rcu_tasks() assumes * read-side critical sections have completed. call_rcu_tasks() assumes
* that the read-side critical sections end at a voluntary context * that the read-side critical sections end at a voluntary context
* switch (not a preemption!), entry into idle, or transition to usermode * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
* execution. As such, there are no read-side primitives analogous to * or transition to usermode execution. As such, there are no read-side
* rcu_read_lock() and rcu_read_unlock() because this primitive is intended * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
* to determine that all tasks have passed through a safe state, not so * this primitive is intended to determine that all tasks have passed
* much for data-strcuture synchronization. * through a safe state, not so much for data-strcuture synchronization.
* *
* See the description of call_rcu() for more detailed information on * See the description of call_rcu() for more detailed information on
* memory ordering guarantees. * memory ordering guarantees.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment