Commit 43766c3e authored by Paul E. McKenney's avatar Paul E. McKenney

rcu-tasks: Make RCU Tasks Trace make use of RCU scheduler hooks

This commit makes the calls to rcu_tasks_qs() detect and report
quiescent states for RCU tasks trace.  If the task is in a quiescent
state and if ->trc_reader_checked is not yet set, the task sets its own
->trc_reader_checked.  This will cause the grace-period kthread to
remove it from the holdout list if it still remains there.

[ paulmck: Fix conditional compilation per kbuild test robot feedback. ]
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent af051ca4
...@@ -131,20 +131,50 @@ static inline void rcu_init_nohz(void) { } ...@@ -131,20 +131,50 @@ static inline void rcu_init_nohz(void) { }
* This is a macro rather than an inline function to avoid #include hell. * This is a macro rather than an inline function to avoid #include hell.
*/ */
#ifdef CONFIG_TASKS_RCU_GENERIC #ifdef CONFIG_TASKS_RCU_GENERIC
#define rcu_tasks_qs(t) \
do { \ # ifdef CONFIG_TASKS_RCU
if (READ_ONCE((t)->rcu_tasks_holdout)) \ # define rcu_tasks_classic_qs(t, preempt) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \ do { \
if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0) } while (0)
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void); void synchronize_rcu_tasks(void);
# else
# define rcu_tasks_classic_qs(t, preempt) do { } while (0)
# define call_rcu_tasks call_rcu
# define synchronize_rcu_tasks synchronize_rcu
# endif
# ifdef CONFIG_TASKS_RCU_TRACE
# define rcu_tasks_trace_qs(t) \
do { \
if (!likely(READ_ONCE((t)->trc_reader_checked)) && \
!unlikely(READ_ONCE((t)->trc_reader_nesting))) { \
smp_store_release(&(t)->trc_reader_checked, true); \
smp_mb(); /* Readers partitioned by store. */ \
} \
} while (0)
# else
# define rcu_tasks_trace_qs(t) do { } while (0)
# endif
#define rcu_tasks_qs(t, preempt) \
do { \
rcu_tasks_classic_qs((t), (preempt)); \
rcu_tasks_trace_qs((t)); \
} while (0)
# ifdef CONFIG_TASKS_RUDE_RCU
void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func); void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks_rude(void); void synchronize_rcu_tasks_rude(void);
# endif
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
void exit_tasks_rcu_start(void); void exit_tasks_rcu_start(void);
void exit_tasks_rcu_finish(void); void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
#define rcu_tasks_qs(t) do { } while (0) #define rcu_tasks_qs(t, preempt) do { } while (0)
#define rcu_note_voluntary_context_switch(t) do { } while (0) #define rcu_note_voluntary_context_switch(t) do { } while (0)
#define call_rcu_tasks call_rcu #define call_rcu_tasks call_rcu
#define synchronize_rcu_tasks synchronize_rcu #define synchronize_rcu_tasks synchronize_rcu
...@@ -161,7 +191,7 @@ static inline void exit_tasks_rcu_finish(void) { } ...@@ -161,7 +191,7 @@ static inline void exit_tasks_rcu_finish(void) { }
*/ */
#define cond_resched_tasks_rcu_qs() \ #define cond_resched_tasks_rcu_qs() \
do { \ do { \
rcu_tasks_qs(current); \ rcu_tasks_qs(current, false); \
cond_resched(); \ cond_resched(); \
} while (0) } while (0)
......
...@@ -49,7 +49,7 @@ static inline void rcu_softirq_qs(void) ...@@ -49,7 +49,7 @@ static inline void rcu_softirq_qs(void)
#define rcu_note_context_switch(preempt) \ #define rcu_note_context_switch(preempt) \
do { \ do { \
rcu_qs(); \ rcu_qs(); \
rcu_tasks_qs(current); \ rcu_tasks_qs(current, (preempt)); \
} while (0) } while (0)
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
......
...@@ -180,7 +180,7 @@ static int __noreturn rcu_tasks_kthread(void *arg) ...@@ -180,7 +180,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
/* Pick up any new callbacks. */ /* Pick up any new callbacks. */
raw_spin_lock_irqsave(&rtp->cbs_lock, flags); raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
smp_mb__after_unlock_lock(); // Order updates vs. GP. smp_mb__after_spinlock(); // Order updates vs. GP.
list = rtp->cbs_head; list = rtp->cbs_head;
rtp->cbs_head = NULL; rtp->cbs_head = NULL;
rtp->cbs_tail = &rtp->cbs_head; rtp->cbs_tail = &rtp->cbs_head;
...@@ -874,7 +874,7 @@ static void rcu_tasks_trace_pertask(struct task_struct *t, ...@@ -874,7 +874,7 @@ static void rcu_tasks_trace_pertask(struct task_struct *t,
struct list_head *hop) struct list_head *hop)
{ {
WRITE_ONCE(t->trc_reader_need_end, false); WRITE_ONCE(t->trc_reader_need_end, false);
t->trc_reader_checked = false; WRITE_ONCE(t->trc_reader_checked, false);
t->trc_ipi_to_cpu = -1; t->trc_ipi_to_cpu = -1;
trc_wait_for_one_reader(t, hop); trc_wait_for_one_reader(t, hop);
} }
...@@ -983,6 +983,7 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) ...@@ -983,6 +983,7 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end)); pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
} }
smp_mb(); // Caller's code must be ordered after wakeup. smp_mb(); // Caller's code must be ordered after wakeup.
// Pairs with pretty much every ordering primitive.
} }
/* Report any needed quiescent state for this exiting task. */ /* Report any needed quiescent state for this exiting task. */
......
...@@ -331,8 +331,7 @@ void rcu_note_context_switch(bool preempt) ...@@ -331,8 +331,7 @@ void rcu_note_context_switch(bool preempt)
rcu_qs(); rcu_qs();
if (rdp->exp_deferred_qs) if (rdp->exp_deferred_qs)
rcu_report_exp_rdp(rdp); rcu_report_exp_rdp(rdp);
if (!preempt) rcu_tasks_qs(current, preempt);
rcu_tasks_qs(current);
trace_rcu_utilization(TPS("End context switch")); trace_rcu_utilization(TPS("End context switch"));
} }
EXPORT_SYMBOL_GPL(rcu_note_context_switch); EXPORT_SYMBOL_GPL(rcu_note_context_switch);
...@@ -841,8 +840,7 @@ void rcu_note_context_switch(bool preempt) ...@@ -841,8 +840,7 @@ void rcu_note_context_switch(bool preempt)
this_cpu_write(rcu_data.rcu_urgent_qs, false); this_cpu_write(rcu_data.rcu_urgent_qs, false);
if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
rcu_momentary_dyntick_idle(); rcu_momentary_dyntick_idle();
if (!preempt) rcu_tasks_qs(current, preempt);
rcu_tasks_qs(current);
out: out:
trace_rcu_utilization(TPS("End context switch")); trace_rcu_utilization(TPS("End context switch"));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment