Commit a616aec9 authored by Ingo Molnar's avatar Ingo Molnar Committed by Paul E. McKenney

rcu: Fix various typos in comments

Fix ~12 single-word typos in RCU code comments.

[ paulmck: Apply feedback from Randy Dunlap. ]
Reviewed-by: default avatarRandy Dunlap <rdunlap@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent e75bcd48
...@@ -777,9 +777,9 @@ static bool srcu_might_be_idle(struct srcu_struct *ssp) ...@@ -777,9 +777,9 @@ static bool srcu_might_be_idle(struct srcu_struct *ssp)
spin_unlock_irqrestore_rcu_node(sdp, flags); spin_unlock_irqrestore_rcu_node(sdp, flags);
/* /*
* No local callbacks, so probabalistically probe global state. * No local callbacks, so probabilistically probe global state.
* Exact information would require acquiring locks, which would * Exact information would require acquiring locks, which would
* kill scalability, hence the probabalistic nature of the probe. * kill scalability, hence the probabilistic nature of the probe.
*/ */
/* First, see if enough time has passed since the last GP. */ /* First, see if enough time has passed since the last GP. */
......
...@@ -94,9 +94,9 @@ static void rcu_sync_func(struct rcu_head *rhp) ...@@ -94,9 +94,9 @@ static void rcu_sync_func(struct rcu_head *rhp)
rcu_sync_call(rsp); rcu_sync_call(rsp);
} else { } else {
/* /*
* We're at least a GP after the last rcu_sync_exit(); eveybody * We're at least a GP after the last rcu_sync_exit(); everybody
* will now have observed the write side critical section. * will now have observed the write side critical section.
* Let 'em rip!. * Let 'em rip!
*/ */
WRITE_ONCE(rsp->gp_state, GP_IDLE); WRITE_ONCE(rsp->gp_state, GP_IDLE);
} }
......
...@@ -23,7 +23,7 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp); ...@@ -23,7 +23,7 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
* struct rcu_tasks - Definition for a Tasks-RCU-like mechanism. * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
* @cbs_head: Head of callback list. * @cbs_head: Head of callback list.
* @cbs_tail: Tail pointer for callback list. * @cbs_tail: Tail pointer for callback list.
* @cbs_wq: Wait queue allowning new callback to get kthread's attention. * @cbs_wq: Wait queue allowing new callback to get kthread's attention.
* @cbs_lock: Lock protecting callback list. * @cbs_lock: Lock protecting callback list.
* @kthread_ptr: This flavor's grace-period/callback-invocation kthread. * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
* @gp_func: This flavor's grace-period-wait function. * @gp_func: This flavor's grace-period-wait function.
...@@ -504,7 +504,7 @@ DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); ...@@ -504,7 +504,7 @@ DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
* or transition to usermode execution. As such, there are no read-side * or transition to usermode execution. As such, there are no read-side
* primitives analogous to rcu_read_lock() and rcu_read_unlock() because * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
* this primitive is intended to determine that all tasks have passed * this primitive is intended to determine that all tasks have passed
* through a safe state, not so much for data-strcuture synchronization. * through a safe state, not so much for data-structure synchronization.
* *
* See the description of call_rcu() for more detailed information on * See the description of call_rcu() for more detailed information on
* memory ordering guarantees. * memory ordering guarantees.
...@@ -637,7 +637,7 @@ DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude, ...@@ -637,7 +637,7 @@ DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
* there are no read-side primitives analogous to rcu_read_lock() and * there are no read-side primitives analogous to rcu_read_lock() and
* rcu_read_unlock() because this primitive is intended to determine * rcu_read_unlock() because this primitive is intended to determine
* that all tasks have passed through a safe state, not so much for * that all tasks have passed through a safe state, not so much for
* data-strcuture synchronization. * data-structure synchronization.
* *
* See the description of call_rcu() for more detailed information on * See the description of call_rcu() for more detailed information on
* memory ordering guarantees. * memory ordering guarantees.
...@@ -1163,7 +1163,7 @@ static void exit_tasks_rcu_finish_trace(struct task_struct *t) ...@@ -1163,7 +1163,7 @@ static void exit_tasks_rcu_finish_trace(struct task_struct *t)
* there are no read-side primitives analogous to rcu_read_lock() and * there are no read-side primitives analogous to rcu_read_lock() and
* rcu_read_unlock() because this primitive is intended to determine * rcu_read_unlock() because this primitive is intended to determine
* that all tasks have passed through a safe state, not so much for * that all tasks have passed through a safe state, not so much for
* data-strcuture synchronization. * data-structure synchronization.
* *
* See the description of call_rcu() for more detailed information on * See the description of call_rcu() for more detailed information on
* memory ordering guarantees. * memory ordering guarantees.
......
...@@ -2489,7 +2489,7 @@ int rcutree_dead_cpu(unsigned int cpu) ...@@ -2489,7 +2489,7 @@ int rcutree_dead_cpu(unsigned int cpu)
/* /*
* Invoke any RCU callbacks that have made it to the end of their grace * Invoke any RCU callbacks that have made it to the end of their grace
* period. Thottle as specified by rdp->blimit. * period. Throttle as specified by rdp->blimit.
*/ */
static void rcu_do_batch(struct rcu_data *rdp) static void rcu_do_batch(struct rcu_data *rdp)
{ {
...@@ -3848,7 +3848,7 @@ EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu); ...@@ -3848,7 +3848,7 @@ EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
* *
* If a full RCU grace period has elapsed since the earlier call from * If a full RCU grace period has elapsed since the earlier call from
* which oldstate was obtained, return @true, otherwise return @false. * which oldstate was obtained, return @true, otherwise return @false.
* If @false is returned, it is the caller's responsibilty to invoke this * If @false is returned, it is the caller's responsibility to invoke this
* function later on until it does return @true. Alternatively, the caller * function later on until it does return @true. Alternatively, the caller
* can explicitly wait for a grace period, for example, by passing @oldstate * can explicitly wait for a grace period, for example, by passing @oldstate
* to cond_synchronize_rcu() or by directly invoking synchronize_rcu(). * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
...@@ -4094,7 +4094,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier); ...@@ -4094,7 +4094,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier);
/* /*
* Propagate ->qsinitmask bits up the rcu_node tree to account for the * Propagate ->qsinitmask bits up the rcu_node tree to account for the
* first CPU in a given leaf rcu_node structure coming online. The caller * first CPU in a given leaf rcu_node structure coming online. The caller
* must hold the corresponding leaf rcu_node ->lock with interrrupts * must hold the corresponding leaf rcu_node ->lock with interrupts
* disabled. * disabled.
*/ */
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
......
...@@ -153,7 +153,7 @@ struct rcu_data { ...@@ -153,7 +153,7 @@ struct rcu_data {
unsigned long gp_seq; /* Track rsp->gp_seq counter. */ unsigned long gp_seq; /* Track rsp->gp_seq counter. */
unsigned long gp_seq_needed; /* Track furthest future GP request. */ unsigned long gp_seq_needed; /* Track furthest future GP request. */
union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
bool core_needs_qs; /* Core waits for quiesc state. */ bool core_needs_qs; /* Core waits for quiescent state. */
bool beenonline; /* CPU online at least once. */ bool beenonline; /* CPU online at least once. */
bool gpwrap; /* Possible ->gp_seq wrap. */ bool gpwrap; /* Possible ->gp_seq wrap. */
bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */ bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */
......
...@@ -2857,7 +2857,7 @@ static void show_rcu_nocb_state(struct rcu_data *rdp) ...@@ -2857,7 +2857,7 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock); waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
wassleep = swait_active(&rdp->nocb_gp_wq); wassleep = swait_active(&rdp->nocb_gp_wq);
if (!rdp->nocb_gp_sleep && !waslocked && !wassleep) if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
return; /* Nothing untowards. */ return; /* Nothing untoward. */
pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c %c\n", pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
"lL"[waslocked], "lL"[waslocked],
......
...@@ -174,7 +174,7 @@ static inline bool spin_trylock(spinlock_t *lock) ...@@ -174,7 +174,7 @@ static inline bool spin_trylock(spinlock_t *lock)
} }
struct completion { struct completion {
/* Hopefuly this won't overflow. */ /* Hopefully this won't overflow. */
unsigned int count; unsigned int count;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment