Commit f7f7bac9 authored by Steven Rostedt (Red Hat)'s avatar Steven Rostedt (Red Hat) Committed by Steven Rostedt

rcu: Have the RCU tracepoints use the tracepoint_string infrastructure

Currently, RCU tracepoints save only a pointer to strings in the
ring buffer. When displayed via the /sys/kernel/debug/tracing/trace file
they are referenced like the printf "%s" that looks at the address
in the ring buffer and prints out the string it points too. This requires
that the strings are constant and persistent in the kernel.

The problem with this is for tools like trace-cmd and perf that read the
binary data from the buffers but have no access to the kernel memory to
find out what string is represented by the address in the buffer.

By using the tracepoint_string infrastructure, the RCU tracepoint strings
can be exported such that userspace tools can map the addresses to
the strings.

 # cat /sys/kernel/debug/tracing/printk_formats
0xffffffff81a4a0e8 : "rcu_preempt"
0xffffffff81a4a0f4 : "rcu_bh"
0xffffffff81a4a100 : "rcu_sched"
0xffffffff818437a0 : "cpuqs"
0xffffffff818437a6 : "rcu_sched"
0xffffffff818437a0 : "cpuqs"
0xffffffff818437b0 : "rcu_bh"
0xffffffff818437b7 : "Start context switch"
0xffffffff818437cc : "End context switch"
0xffffffff818437a0 : "cpuqs"
[...]

Now userspaces tools can display:

 rcu_utilization:      Start context switch
 rcu_dyntick:          Start 1 0
 rcu_utilization:      End context switch
 rcu_batch_start:      rcu_preempt CBs=0/5 bl=10
 rcu_dyntick:          End 0 140000000000000
 rcu_invoke_callback:  rcu_preempt rhp=0xffff880071c0d600 func=proc_i_callback
 rcu_invoke_callback:  rcu_preempt rhp=0xffff880077b5b230 func=__d_free
 rcu_dyntick:          Start 140000000000000 0
 rcu_invoke_callback:  rcu_preempt rhp=0xffff880077563980 func=file_free_rcu
 rcu_batch_end:        rcu_preempt CBs-invoked=3 idle=>c<>c<>c<>c<
 rcu_utilization:      End RCU core
 rcu_grace_period:     rcu_preempt 9741 start
 rcu_dyntick:          Start 1 0
 rcu_dyntick:          End 0 140000000000000
 rcu_dyntick:          Start 140000000000000 0

Instead of:

 rcu_utilization:      ffffffff81843110
 rcu_future_grace_period: ffffffff81842f1d 9939 9939 9940 0 0 3 ffffffff81842f32
 rcu_batch_start:      ffffffff81842f1d CBs=0/4 bl=10
 rcu_future_grace_period: ffffffff81842f1d 9939 9939 9940 0 0 3 ffffffff81842f3c
 rcu_grace_period:     ffffffff81842f1d 9939 ffffffff81842f80
 rcu_invoke_callback:  ffffffff81842f1d rhp=0xffff88007888aac0 func=file_free_rcu
 rcu_grace_period:     ffffffff81842f1d 9939 ffffffff81842f95
 rcu_invoke_callback:  ffffffff81842f1d rhp=0xffff88006aeb4600 func=proc_i_callback
 rcu_future_grace_period: ffffffff81842f1d 9939 9939 9940 0 0 3 ffffffff81842f32
 rcu_future_grace_period: ffffffff81842f1d 9939 9939 9940 0 0 3 ffffffff81842f3c
 rcu_invoke_callback:  ffffffff81842f1d rhp=0xffff880071cb9fc0 func=__d_free
 rcu_grace_period:     ffffffff81842f1d 9939 ffffffff81842f80
 rcu_invoke_callback:  ffffffff81842f1d rhp=0xffff88007888ae80 func=file_free_rcu
 rcu_batch_end:        ffffffff81842f1d CBs-invoked=4 idle=>c<>c<>c<>c<
 rcu_utilization:      ffffffff8184311f
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent a41bfeb2
This diff is collapsed.
...@@ -167,7 +167,7 @@ static void rcu_preempt_qs(int cpu) ...@@ -167,7 +167,7 @@ static void rcu_preempt_qs(int cpu)
struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
if (rdp->passed_quiesce == 0) if (rdp->passed_quiesce == 0)
trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs"); trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs"));
rdp->passed_quiesce = 1; rdp->passed_quiesce = 1;
current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
} }
...@@ -386,7 +386,7 @@ void rcu_read_unlock_special(struct task_struct *t) ...@@ -386,7 +386,7 @@ void rcu_read_unlock_special(struct task_struct *t)
np = rcu_next_node_entry(t, rnp); np = rcu_next_node_entry(t, rnp);
list_del_init(&t->rcu_node_entry); list_del_init(&t->rcu_node_entry);
t->rcu_blocked_node = NULL; t->rcu_blocked_node = NULL;
trace_rcu_unlock_preempted_task("rcu_preempt", trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
rnp->gpnum, t->pid); rnp->gpnum, t->pid);
if (&t->rcu_node_entry == rnp->gp_tasks) if (&t->rcu_node_entry == rnp->gp_tasks)
rnp->gp_tasks = np; rnp->gp_tasks = np;
...@@ -410,7 +410,7 @@ void rcu_read_unlock_special(struct task_struct *t) ...@@ -410,7 +410,7 @@ void rcu_read_unlock_special(struct task_struct *t)
*/ */
empty_exp_now = !rcu_preempted_readers_exp(rnp); empty_exp_now = !rcu_preempted_readers_exp(rnp);
if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) { if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
trace_rcu_quiescent_state_report("preempt_rcu", trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
rnp->gpnum, rnp->gpnum,
0, rnp->qsmask, 0, rnp->qsmask,
rnp->level, rnp->level,
...@@ -1248,12 +1248,12 @@ static int rcu_boost_kthread(void *arg) ...@@ -1248,12 +1248,12 @@ static int rcu_boost_kthread(void *arg)
int spincnt = 0; int spincnt = 0;
int more2boost; int more2boost;
trace_rcu_utilization("Start boost kthread@init"); trace_rcu_utilization(TPS("Start boost kthread@init"));
for (;;) { for (;;) {
rnp->boost_kthread_status = RCU_KTHREAD_WAITING; rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
trace_rcu_utilization("End boost kthread@rcu_wait"); trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
rcu_wait(rnp->boost_tasks || rnp->exp_tasks); rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
trace_rcu_utilization("Start boost kthread@rcu_wait"); trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
more2boost = rcu_boost(rnp); more2boost = rcu_boost(rnp);
if (more2boost) if (more2boost)
...@@ -1262,14 +1262,14 @@ static int rcu_boost_kthread(void *arg) ...@@ -1262,14 +1262,14 @@ static int rcu_boost_kthread(void *arg)
spincnt = 0; spincnt = 0;
if (spincnt > 10) { if (spincnt > 10) {
rnp->boost_kthread_status = RCU_KTHREAD_YIELDING; rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
trace_rcu_utilization("End boost kthread@rcu_yield"); trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
schedule_timeout_interruptible(2); schedule_timeout_interruptible(2);
trace_rcu_utilization("Start boost kthread@rcu_yield"); trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
spincnt = 0; spincnt = 0;
} }
} }
/* NOTREACHED */ /* NOTREACHED */
trace_rcu_utilization("End boost kthread@notreached"); trace_rcu_utilization(TPS("End boost kthread@notreached"));
return 0; return 0;
} }
...@@ -1417,7 +1417,7 @@ static void rcu_cpu_kthread(unsigned int cpu) ...@@ -1417,7 +1417,7 @@ static void rcu_cpu_kthread(unsigned int cpu)
int spincnt; int spincnt;
for (spincnt = 0; spincnt < 10; spincnt++) { for (spincnt = 0; spincnt < 10; spincnt++) {
trace_rcu_utilization("Start CPU kthread@rcu_wait"); trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
local_bh_disable(); local_bh_disable();
*statusp = RCU_KTHREAD_RUNNING; *statusp = RCU_KTHREAD_RUNNING;
this_cpu_inc(rcu_cpu_kthread_loops); this_cpu_inc(rcu_cpu_kthread_loops);
...@@ -1429,15 +1429,15 @@ static void rcu_cpu_kthread(unsigned int cpu) ...@@ -1429,15 +1429,15 @@ static void rcu_cpu_kthread(unsigned int cpu)
rcu_kthread_do_work(); rcu_kthread_do_work();
local_bh_enable(); local_bh_enable();
if (*workp == 0) { if (*workp == 0) {
trace_rcu_utilization("End CPU kthread@rcu_wait"); trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
*statusp = RCU_KTHREAD_WAITING; *statusp = RCU_KTHREAD_WAITING;
return; return;
} }
} }
*statusp = RCU_KTHREAD_YIELDING; *statusp = RCU_KTHREAD_YIELDING;
trace_rcu_utilization("Start CPU kthread@rcu_yield"); trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
schedule_timeout_interruptible(2); schedule_timeout_interruptible(2);
trace_rcu_utilization("End CPU kthread@rcu_yield"); trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
*statusp = RCU_KTHREAD_WAITING; *statusp = RCU_KTHREAD_WAITING;
} }
...@@ -2200,7 +2200,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) ...@@ -2200,7 +2200,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
* Wait for the grace period. Do so interruptibly to avoid messing * Wait for the grace period. Do so interruptibly to avoid messing
* up the load average. * up the load average.
*/ */
trace_rcu_future_gp(rnp, rdp, c, "StartWait"); trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
for (;;) { for (;;) {
wait_event_interruptible( wait_event_interruptible(
rnp->nocb_gp_wq[c & 0x1], rnp->nocb_gp_wq[c & 0x1],
...@@ -2208,9 +2208,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) ...@@ -2208,9 +2208,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
if (likely(d)) if (likely(d))
break; break;
flush_signals(current); flush_signals(current);
trace_rcu_future_gp(rnp, rdp, c, "ResumeWait"); trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
} }
trace_rcu_future_gp(rnp, rdp, c, "EndWait"); trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
smp_mb(); /* Ensure that CB invocation happens after GP end. */ smp_mb(); /* Ensure that CB invocation happens after GP end. */
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment