Commit d2347763 authored by Namhyung Kim's avatar Namhyung Kim Committed by Arnaldo Carvalho de Melo

perf offcpu: Track child processes

When -p option used or a workload is given, it needs to handle child
processes.  The perf_event can inherit those task events
automatically.  We can add a new BPF program in task_newtask
tracepoint to track child processes.

Before:
  $ sudo perf record --off-cpu -- perf bench sched messaging
  $ sudo perf report --stat | grep -A1 offcpu
  offcpu-time stats:
            SAMPLE events:        1

After:
  $ sudo perf record -a --off-cpu -- perf bench sched messaging
  $ sudo perf report --stat | grep -A1 offcpu
  offcpu-time stats:
            SAMPLE events:      856
Signed-off-by: default avatarNamhyung Kim <namhyung@kernel.org>
Cc: Blake Jones <blakejones@google.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Milian Wolff <milian.wolff@kdab.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: bpf@vger.kernel.org
Link: https://lore.kernel.org/r/20220811185456.194721-4-namhyung@kernel.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent d6f415ca
......@@ -17,6 +17,7 @@
#include "bpf_skel/off_cpu.skel.h"
#define MAX_STACKS 32
#define MAX_PROC 4096
/* we don't need actual timestamp, just want to put the samples at last */
#define OFF_CPU_TIMESTAMP (~0ull << 32)
......@@ -164,10 +165,16 @@ int off_cpu_prepare(struct evlist *evlist, struct target *target,
ntasks++;
}
if (ntasks < MAX_PROC)
ntasks = MAX_PROC;
bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
} else if (target__has_task(target)) {
ntasks = perf_thread_map__nr(evlist->core.threads);
bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
} else if (target__none(target)) {
bpf_map__set_max_entries(skel->maps.task_filter, MAX_PROC);
}
if (evlist__first(evlist)->cgrp) {
......
......@@ -12,6 +12,9 @@
#define TASK_INTERRUPTIBLE 0x0001
#define TASK_UNINTERRUPTIBLE 0x0002
/* create a new thread */
#define CLONE_THREAD 0x10000
#define MAX_STACKS 32
#define MAX_ENTRIES 102400
......@@ -220,6 +223,33 @@ static int off_cpu_stat(u64 *ctx, struct task_struct *prev,
return 0;
}
SEC("tp_btf/task_newtask")
int on_newtask(u64 *ctx)
{
struct task_struct *task;
u64 clone_flags;
u32 pid;
u8 val = 1;
if (!uses_tgid)
return 0;
task = (struct task_struct *)bpf_get_current_task();
pid = BPF_CORE_READ(task, tgid);
if (!bpf_map_lookup_elem(&task_filter, &pid))
return 0;
task = (struct task_struct *)ctx[0];
clone_flags = ctx[1];
pid = task->tgid;
if (!(clone_flags & CLONE_THREAD))
bpf_map_update_elem(&task_filter, &pid, &val, BPF_NOEXIST);
return 0;
}
SEC("tp_btf/sched_switch")
int on_switch(u64 *ctx)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment