Commit 8c2accc8 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible changes:

  - Allows BPF scriptlets specify arguments to be fetched using
    DWARF info, using a prologue generated at compile/build time (He Kuang, Wang Nan)

  - Allow attaching BPF scriptlets to module symbols (Wang Nan)

  - Allow attaching BPF scriptlets to userspace code using uprobe (Wang Nan)

  - BPF programs now can specify 'perf probe' tunables via its section name,
    separating key=val values using semicolons (Wang Nan)

Testing some of these new BPF features:

  Use case: get callchains when receiving SSL packets, filter then in the
            kernel, at arbitrary place.

  # cat ssl.bpf.c
  #define SEC(NAME) __attribute__((section(NAME), used))

  struct pt_regs;

  SEC("func=__inet_lookup_established hnum")
  int func(struct pt_regs *ctx, int err, unsigned short port)
  {
          return err == 0 && port == 443;
  }

  char _license[] SEC("license") = "GPL";
  int  _version   SEC("version") = LINUX_VERSION_CODE;
  #
  # perf record -a -g -e ssl.bpf.c
  ^C[ perf record: Woken up 1 times to write data ]
  [ perf record: Captured and wrote 0.787 MB perf.data (3 samples) ]
  # perf script | head -30
  swapper     0 [000] 58783.268118: perf_bpf_probe:func: (ffffffff816a0f60) hnum=0x1bb
	 8a0f61 __inet_lookup_established (/lib/modules/4.3.0+/build/vmlinux)
	 896def ip_rcv_finish (/lib/modules/4.3.0+/build/vmlinux)
	 8976c2 ip_rcv (/lib/modules/4.3.0+/build/vmlinux)
	 855eba __netif_receive_skb_core (/lib/modules/4.3.0+/build/vmlinux)
	 8565d8 __netif_receive_skb (/lib/modules/4.3.0+/build/vmlinux)
	 8572a8 process_backlog (/lib/modules/4.3.0+/build/vmlinux)
	 856b11 net_rx_action (/lib/modules/4.3.0+/build/vmlinux)
	 2a284b __do_softirq (/lib/modules/4.3.0+/build/vmlinux)
	 2a2ba3 irq_exit (/lib/modules/4.3.0+/build/vmlinux)
	 96b7a4 do_IRQ (/lib/modules/4.3.0+/build/vmlinux)
	 969807 ret_from_intr (/lib/modules/4.3.0+/build/vmlinux)
	 2dede5 cpu_startup_entry (/lib/modules/4.3.0+/build/vmlinux)
	 95d5bc rest_init (/lib/modules/4.3.0+/build/vmlinux)
	1163ffa start_kernel ([kernel.vmlinux].init.text)
	11634d7 x86_64_start_reservations ([kernel.vmlinux].init.text)
	1163623 x86_64_start_kernel ([kernel.vmlinux].init.text)

  qemu-system-x86  9178 [003] 58785.792417: perf_bpf_probe:func: (ffffffff816a0f60) hnum=0x1bb
	 8a0f61 __inet_lookup_established (/lib/modules/4.3.0+/build/vmlinux)
	 896def ip_rcv_finish (/lib/modules/4.3.0+/build/vmlinux)
	 8976c2 ip_rcv (/lib/modules/4.3.0+/build/vmlinux)
	 855eba __netif_receive_skb_core (/lib/modules/4.3.0+/build/vmlinux)
	 8565d8 __netif_receive_skb (/lib/modules/4.3.0+/build/vmlinux)
	 856660 netif_receive_skb_internal (/lib/modules/4.3.0+/build/vmlinux)
	 8566ec netif_receive_skb_sk (/lib/modules/4.3.0+/build/vmlinux)
	   430a br_handle_frame_finish ([bridge])
	   48bc br_handle_frame ([bridge])
	 855f44 __netif_receive_skb_core (/lib/modules/4.3.0+/build/vmlinux)
	 8565d8 __netif_receive_skb (/lib/modules/4.3.0+/build/vmlinux)
  #

    Use 'perf probe' various options to list functions, see what variables can
    be collected at any given point, experiment first collecting without a filter,
    then filter, use it together with 'perf trace', 'perf top', with or without
    callchains, if it explodes, please tell us!

  - Introduce a new callchain mode: "folded", that will list per line
    representations of all callchains for a give histogram entry, facilitating
    'perf report' output processing by other tools, such as Brendan Gregg's
    flamegraph tools (Namhyung Kim)

  E.g:

 # perf report | grep -v ^# | head
    18.37%     0.00%  swapper  [kernel.kallsyms]   [k] cpu_startup_entry
                    |
                    ---cpu_startup_entry
                       |
                       |--12.07%--start_secondary
                       |
                        --6.30%--rest_init
                                  start_kernel
                                  x86_64_start_reservations
                                  x86_64_start_kernel
  #

 Becomes, in "folded" mode:

 # perf report -g folded | grep -v ^# | head -5
     18.37%     0.00%  swapper [kernel.kallsyms]   [k] cpu_startup_entry
   12.07% cpu_startup_entry;start_secondary
    6.30% cpu_startup_entry;rest_init;start_kernel;x86_64_start_reservations;x86_64_start_kernel
     16.90%     0.00%  swapper [kernel.kallsyms]   [k] call_cpuidle
   11.23% call_cpuidle;cpu_startup_entry;start_secondary
    5.67% call_cpuidle;cpu_startup_entry;rest_init;start_kernel;x86_64_start_reservations;x86_64_start_kernel
     16.90%     0.00%  swapper [kernel.kallsyms]   [k] cpuidle_enter
   11.23% cpuidle_enter;call_cpuidle;cpu_startup_entry;start_secondary
    5.67% cpuidle_enter;call_cpuidle;cpu_startup_entry;rest_init;start_kernel;x86_64_start_reservations;x86_64_start_kernel
     15.12%     0.00%  swapper [kernel.kallsyms]   [k] cpuidle_enter_state
  #

   The user can also select one of "count", "period" or "percent" as the first column.

Infrastructure changes:

  - Fix multiple leaks found with Valgrind and a refcount
    debugger (Masami Hiramatsu)

  - Add further 'perf test' entries for BPF and LLVM (Wang Nan)

  - Improve 'perf test' to suport subtests, so that the series of tests
    performed in the LLVM and BPF main tests appear in the default 'perf test'
    output (Wang Nan)

  - Move memdup() from tools/perf to tools/lib/string.c (Arnaldo Carvalho de Melo)

  - Adopt strtobool() from the kernel into tools/lib/ (Wang Nan)

  - Fix selftests_install tools/ Makefile rule (Kevin Hilman)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 90eec103 2c6caff2
...@@ -96,7 +96,7 @@ cgroup_install firewire_install hv_install lguest_install perf_install usb_insta ...@@ -96,7 +96,7 @@ cgroup_install firewire_install hv_install lguest_install perf_install usb_insta
$(call descend,$(@:_install=),install) $(call descend,$(@:_install=),install)
selftests_install: selftests_install:
$(call descend,testing/$(@:_clean=),install) $(call descend,testing/$(@:_install=),install)
turbostat_install x86_energy_perf_policy_install: turbostat_install x86_energy_perf_policy_install:
$(call descend,power/x86/$(@:_install=),install) $(call descend,power/x86/$(@:_install=),install)
......
#ifndef _TOOLS_LINUX_STRING_H_
#define _TOOLS_LINUX_STRING_H_
#include <linux/types.h> /* for size_t */
void *memdup(const void *src, size_t len);
int strtobool(const char *s, bool *res);
#endif /* _LINUX_STRING_H_ */
...@@ -152,7 +152,11 @@ struct bpf_program { ...@@ -152,7 +152,11 @@ struct bpf_program {
} *reloc_desc; } *reloc_desc;
int nr_reloc; int nr_reloc;
int fd; struct {
int nr;
int *fds;
} instances;
bpf_program_prep_t preprocessor;
struct bpf_object *obj; struct bpf_object *obj;
void *priv; void *priv;
...@@ -206,10 +210,25 @@ struct bpf_object { ...@@ -206,10 +210,25 @@ struct bpf_object {
static void bpf_program__unload(struct bpf_program *prog) static void bpf_program__unload(struct bpf_program *prog)
{ {
int i;
if (!prog) if (!prog)
return; return;
zclose(prog->fd); /*
* If the object is opened but the program was never loaded,
* it is possible that prog->instances.nr == -1.
*/
if (prog->instances.nr > 0) {
for (i = 0; i < prog->instances.nr; i++)
zclose(prog->instances.fds[i]);
} else if (prog->instances.nr != -1) {
pr_warning("Internal error: instances.nr is %d\n",
prog->instances.nr);
}
prog->instances.nr = -1;
zfree(&prog->instances.fds);
} }
static void bpf_program__exit(struct bpf_program *prog) static void bpf_program__exit(struct bpf_program *prog)
...@@ -260,7 +279,8 @@ bpf_program__init(void *data, size_t size, char *name, int idx, ...@@ -260,7 +279,8 @@ bpf_program__init(void *data, size_t size, char *name, int idx,
memcpy(prog->insns, data, memcpy(prog->insns, data,
prog->insns_cnt * sizeof(struct bpf_insn)); prog->insns_cnt * sizeof(struct bpf_insn));
prog->idx = idx; prog->idx = idx;
prog->fd = -1; prog->instances.fds = NULL;
prog->instances.nr = -1;
return 0; return 0;
errout: errout:
...@@ -860,13 +880,73 @@ static int ...@@ -860,13 +880,73 @@ static int
bpf_program__load(struct bpf_program *prog, bpf_program__load(struct bpf_program *prog,
char *license, u32 kern_version) char *license, u32 kern_version)
{ {
int err, fd; int err = 0, fd, i;
if (prog->instances.nr < 0 || !prog->instances.fds) {
if (prog->preprocessor) {
pr_warning("Internal error: can't load program '%s'\n",
prog->section_name);
return -LIBBPF_ERRNO__INTERNAL;
}
prog->instances.fds = malloc(sizeof(int));
if (!prog->instances.fds) {
pr_warning("Not enough memory for BPF fds\n");
return -ENOMEM;
}
prog->instances.nr = 1;
prog->instances.fds[0] = -1;
}
if (!prog->preprocessor) {
if (prog->instances.nr != 1) {
pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
prog->section_name, prog->instances.nr);
}
err = load_program(prog->insns, prog->insns_cnt, err = load_program(prog->insns, prog->insns_cnt,
license, kern_version, &fd); license, kern_version, &fd);
if (!err) if (!err)
prog->fd = fd; prog->instances.fds[0] = fd;
goto out;
}
for (i = 0; i < prog->instances.nr; i++) {
struct bpf_prog_prep_result result;
bpf_program_prep_t preprocessor = prog->preprocessor;
bzero(&result, sizeof(result));
err = preprocessor(prog, i, prog->insns,
prog->insns_cnt, &result);
if (err) {
pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
i, prog->section_name);
goto out;
}
if (!result.new_insn_ptr || !result.new_insn_cnt) {
pr_debug("Skip loading the %dth instance of program '%s'\n",
i, prog->section_name);
prog->instances.fds[i] = -1;
if (result.pfd)
*result.pfd = -1;
continue;
}
err = load_program(result.new_insn_ptr,
result.new_insn_cnt,
license, kern_version, &fd);
if (err) {
pr_warning("Loading the %dth instance of program '%s' failed\n",
i, prog->section_name);
goto out;
}
if (result.pfd)
*result.pfd = fd;
prog->instances.fds[i] = fd;
}
out:
if (err) if (err)
pr_warning("failed to load program '%s'\n", pr_warning("failed to load program '%s'\n",
prog->section_name); prog->section_name);
...@@ -1121,5 +1201,53 @@ const char *bpf_program__title(struct bpf_program *prog, bool needs_copy) ...@@ -1121,5 +1201,53 @@ const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
int bpf_program__fd(struct bpf_program *prog) int bpf_program__fd(struct bpf_program *prog)
{ {
return prog->fd; return bpf_program__nth_fd(prog, 0);
}
int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
bpf_program_prep_t prep)
{
int *instances_fds;
if (nr_instances <= 0 || !prep)
return -EINVAL;
if (prog->instances.nr > 0 || prog->instances.fds) {
pr_warning("Can't set pre-processor after loading\n");
return -EINVAL;
}
instances_fds = malloc(sizeof(int) * nr_instances);
if (!instances_fds) {
pr_warning("alloc memory failed for fds\n");
return -ENOMEM;
}
/* fill all fd with -1 */
memset(instances_fds, -1, sizeof(int) * nr_instances);
prog->instances.nr = nr_instances;
prog->instances.fds = instances_fds;
prog->preprocessor = prep;
return 0;
}
int bpf_program__nth_fd(struct bpf_program *prog, int n)
{
int fd;
if (n >= prog->instances.nr || n < 0) {
pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
n, prog->section_name, prog->instances.nr);
return -EINVAL;
}
fd = prog->instances.fds[n];
if (fd < 0) {
pr_warning("%dth instance of program '%s' is invalid\n",
n, prog->section_name);
return -ENOENT;
}
return fd;
} }
...@@ -88,6 +88,70 @@ const char *bpf_program__title(struct bpf_program *prog, bool needs_copy); ...@@ -88,6 +88,70 @@ const char *bpf_program__title(struct bpf_program *prog, bool needs_copy);
int bpf_program__fd(struct bpf_program *prog); int bpf_program__fd(struct bpf_program *prog);
struct bpf_insn;
/*
* Libbpf allows callers to adjust BPF programs before being loaded
* into kernel. One program in an object file can be transform into
* multiple variants to be attached to different code.
*
* bpf_program_prep_t, bpf_program__set_prep and bpf_program__nth_fd
* are APIs for this propose.
*
* - bpf_program_prep_t:
* It defines 'preprocessor', which is a caller defined function
* passed to libbpf through bpf_program__set_prep(), and will be
* called before program is loaded. The processor should adjust
* the program one time for each instances according to the number
* passed to it.
*
* - bpf_program__set_prep:
* Attachs a preprocessor to a BPF program. The number of instances
* whould be created is also passed through this function.
*
* - bpf_program__nth_fd:
* After the program is loaded, get resuling fds from bpf program for
* each instances.
*
* If bpf_program__set_prep() is not used, the program whould be loaded
* without adjustment during bpf_object__load(). The program has only
* one instance. In this case bpf_program__fd(prog) is equal to
* bpf_program__nth_fd(prog, 0).
*/
struct bpf_prog_prep_result {
/*
* If not NULL, load new instruction array.
* If set to NULL, don't load this instance.
*/
struct bpf_insn *new_insn_ptr;
int new_insn_cnt;
/* If not NULL, result fd is set to it */
int *pfd;
};
/*
* Parameters of bpf_program_prep_t:
* - prog: The bpf_program being loaded.
* - n: Index of instance being generated.
* - insns: BPF instructions array.
* - insns_cnt:Number of instructions in insns.
* - res: Output parameter, result of transformation.
*
* Return value:
* - Zero: pre-processing success.
* - Non-zero: pre-processing, stop loading.
*/
typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n,
struct bpf_insn *insns, int insns_cnt,
struct bpf_prog_prep_result *res);
int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
bpf_program_prep_t prep);
int bpf_program__nth_fd(struct bpf_program *prog, int n);
/* /*
* We don't need __attribute__((packed)) now since it is * We don't need __attribute__((packed)) now since it is
* unnecessary for 'bpf_map_def' because they are all aligned. * unnecessary for 'bpf_map_def' because they are all aligned.
......
/*
* linux/tools/lib/string.c
*
* Copied from linux/lib/string.c, where it is:
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* More specifically, the first copied function was strtobool, which
* was introduced by:
*
* d0f1fed29e6e ("Add a strtobool function matching semantics of existing in kernel equivalents")
* Author: Jonathan Cameron <jic23@cam.ac.uk>
*/
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <linux/string.h>
/**
* memdup - duplicate region of memory
*
* @src: memory region to duplicate
* @len: memory region length
*/
void *memdup(const void *src, size_t len)
{
void *p = malloc(len);
if (p)
memcpy(p, src, len);
return p;
}
/**
* strtobool - convert common user inputs into boolean values
* @s: input string
* @res: result
*
* This routine returns 0 iff the first character is one of 'Yy1Nn0'.
* Otherwise it will return -EINVAL. Value pointed to by res is
* updated upon finding a match.
*/
int strtobool(const char *s, bool *res)
{
switch (s[0]) {
case 'y':
case 'Y':
case '1':
*res = true;
break;
case 'n':
case 'N':
case '0':
*res = false;
break;
default:
return -EINVAL;
}
return 0;
}
...@@ -170,17 +170,18 @@ OPTIONS ...@@ -170,17 +170,18 @@ OPTIONS
Dump raw trace in ASCII. Dump raw trace in ASCII.
-g:: -g::
--call-graph=<print_type,threshold[,print_limit],order,sort_key,branch>:: --call-graph=<print_type,threshold[,print_limit],order,sort_key[,branch],value>::
Display call chains using type, min percent threshold, print limit, Display call chains using type, min percent threshold, print limit,
call order, sort key and branch. Note that ordering of parameters is not call order, sort key, optional branch and value. Note that ordering of
fixed so any parement can be given in an arbitraty order. One exception parameters is not fixed so any parement can be given in an arbitraty order.
is the print_limit which should be preceded by threshold. One exception is the print_limit which should be preceded by threshold.
print_type can be either: print_type can be either:
- flat: single column, linear exposure of call chains. - flat: single column, linear exposure of call chains.
- graph: use a graph tree, displaying absolute overhead rates. (default) - graph: use a graph tree, displaying absolute overhead rates. (default)
- fractal: like graph, but displays relative rates. Each branch of - fractal: like graph, but displays relative rates. Each branch of
the tree is considered as a new profiled object. the tree is considered as a new profiled object.
- folded: call chains are displayed in a line, separated by semicolons
- none: disable call chain display. - none: disable call chain display.
threshold is a percentage value which specifies a minimum percent to be threshold is a percentage value which specifies a minimum percent to be
...@@ -204,6 +205,11 @@ OPTIONS ...@@ -204,6 +205,11 @@ OPTIONS
- branch: include last branch information in callgraph when available. - branch: include last branch information in callgraph when available.
Usually more convenient to use --branch-history for this. Usually more convenient to use --branch-history for this.
value can be:
- percent: diplay overhead percent (default)
- period: display event period
- count: display event count
--children:: --children::
Accumulate callchain of children to parent entry so that then can Accumulate callchain of children to parent entry so that then can
show up in the output. The output will have a new "Children" column show up in the output. The output will have a new "Children" column
......
...@@ -22,6 +22,7 @@ tools/lib/api ...@@ -22,6 +22,7 @@ tools/lib/api
tools/lib/bpf tools/lib/bpf
tools/lib/hweight.c tools/lib/hweight.c
tools/lib/rbtree.c tools/lib/rbtree.c
tools/lib/string.c
tools/lib/symbol/kallsyms.c tools/lib/symbol/kallsyms.c
tools/lib/symbol/kallsyms.h tools/lib/symbol/kallsyms.h
tools/lib/util/find_next_bit.c tools/lib/util/find_next_bit.c
...@@ -50,6 +51,7 @@ tools/include/linux/log2.h ...@@ -50,6 +51,7 @@ tools/include/linux/log2.h
tools/include/linux/poison.h tools/include/linux/poison.h
tools/include/linux/rbtree.h tools/include/linux/rbtree.h
tools/include/linux/rbtree_augmented.h tools/include/linux/rbtree_augmented.h
tools/include/linux/string.h
tools/include/linux/types.h tools/include/linux/types.h
tools/include/linux/err.h tools/include/linux/err.h
include/asm-generic/bitops/arch_hweight.h include/asm-generic/bitops/arch_hweight.h
......
...@@ -2,10 +2,10 @@ ...@@ -2,10 +2,10 @@
#define ARCH_TESTS_H #define ARCH_TESTS_H
/* Tests */ /* Tests */
int test__rdpmc(void); int test__rdpmc(int subtest);
int test__perf_time_to_tsc(void); int test__perf_time_to_tsc(int subtest);
int test__insn_x86(void); int test__insn_x86(int subtest);
int test__intel_cqm_count_nmi_context(void); int test__intel_cqm_count_nmi_context(int subtest);
#ifdef HAVE_DWARF_UNWIND_SUPPORT #ifdef HAVE_DWARF_UNWIND_SUPPORT
struct thread; struct thread;
......
...@@ -171,7 +171,7 @@ static int test_data_set(struct test_data *dat_set, int x86_64) ...@@ -171,7 +171,7 @@ static int test_data_set(struct test_data *dat_set, int x86_64)
* verbose (-v) option to see all the instructions and whether or not they * verbose (-v) option to see all the instructions and whether or not they
* decoded successfuly. * decoded successfuly.
*/ */
int test__insn_x86(void) int test__insn_x86(int subtest __maybe_unused)
{ {
int ret = 0; int ret = 0;
......
...@@ -33,7 +33,7 @@ static pid_t spawn(void) ...@@ -33,7 +33,7 @@ static pid_t spawn(void)
* the last read counter value to avoid triggering a WARN_ON_ONCE() in * the last read counter value to avoid triggering a WARN_ON_ONCE() in
* smp_call_function_many() caused by sending IPIs from NMI context. * smp_call_function_many() caused by sending IPIs from NMI context.
*/ */
int test__intel_cqm_count_nmi_context(void) int test__intel_cqm_count_nmi_context(int subtest __maybe_unused)
{ {
struct perf_evlist *evlist = NULL; struct perf_evlist *evlist = NULL;
struct perf_evsel *evsel = NULL; struct perf_evsel *evsel = NULL;
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
* %0 is returned, otherwise %-1 is returned. If TSC conversion is not * %0 is returned, otherwise %-1 is returned. If TSC conversion is not
* supported then then the test passes but " (not supported)" is printed. * supported then then the test passes but " (not supported)" is printed.
*/ */
int test__perf_time_to_tsc(void) int test__perf_time_to_tsc(int subtest __maybe_unused)
{ {
struct record_opts opts = { struct record_opts opts = {
.mmap_pages = UINT_MAX, .mmap_pages = UINT_MAX,
......
...@@ -149,7 +149,7 @@ static int __test__rdpmc(void) ...@@ -149,7 +149,7 @@ static int __test__rdpmc(void)
return 0; return 0;
} }
int test__rdpmc(void) int test__rdpmc(int subtest __maybe_unused)
{ {
int status = 0; int status = 0;
int wret = 0; int wret = 0;
......
...@@ -5,6 +5,7 @@ libperf-y += kvm-stat.o ...@@ -5,6 +5,7 @@ libperf-y += kvm-stat.o
libperf-y += perf_regs.o libperf-y += perf_regs.o
libperf-$(CONFIG_DWARF) += dwarf-regs.o libperf-$(CONFIG_DWARF) += dwarf-regs.o
libperf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
......
...@@ -625,7 +625,7 @@ parse_percent_limit(const struct option *opt, const char *str, ...@@ -625,7 +625,7 @@ parse_percent_limit(const struct option *opt, const char *str,
return 0; return 0;
} }
#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function" #define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n" const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
CALLCHAIN_REPORT_HELP CALLCHAIN_REPORT_HELP
...@@ -708,7 +708,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) ...@@ -708,7 +708,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other, OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
"Only display entries with parent-match"), "Only display entries with parent-match"),
OPT_CALLBACK_DEFAULT('g', "call-graph", &report, OPT_CALLBACK_DEFAULT('g', "call-graph", &report,
"print_type,threshold[,print_limit],order,sort_key[,branch]", "print_type,threshold[,print_limit],order,sort_key[,branch],value",
report_callchain_help, &report_parse_callchain_opt, report_callchain_help, &report_parse_callchain_opt,
callchain_default_opt), callchain_default_opt),
OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain, OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
......
...@@ -318,6 +318,18 @@ ifndef NO_LIBELF ...@@ -318,6 +318,18 @@ ifndef NO_LIBELF
CFLAGS += -DHAVE_LIBBPF_SUPPORT CFLAGS += -DHAVE_LIBBPF_SUPPORT
$(call detected,CONFIG_LIBBPF) $(call detected,CONFIG_LIBBPF)
endif endif
ifndef NO_DWARF
ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
CFLAGS += -DHAVE_BPF_PROLOGUE
$(call detected,CONFIG_BPF_PROLOGUE)
else
msg := $(warning BPF prologue is not supported by architecture $(ARCH), missing regs_query_register_offset());
endif
else
msg := $(warning DWARF support is off, BPF prologue is disabled);
endif
endif # NO_LIBBPF endif # NO_LIBBPF
endif # NO_LIBELF endif # NO_LIBELF
......
llvm-src-base.c llvm-src-base.c
llvm-src-kbuild.c llvm-src-kbuild.c
llvm-src-prologue.c
...@@ -31,7 +31,7 @@ perf-y += sample-parsing.o ...@@ -31,7 +31,7 @@ perf-y += sample-parsing.o
perf-y += parse-no-sample-id-all.o perf-y += parse-no-sample-id-all.o
perf-y += kmod-path.o perf-y += kmod-path.o
perf-y += thread-map.o perf-y += thread-map.o
perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o llvm-src-prologue.o
perf-y += bpf.o perf-y += bpf.o
perf-y += topology.o perf-y += topology.o
...@@ -49,6 +49,13 @@ $(OUTPUT)tests/llvm-src-kbuild.c: tests/bpf-script-test-kbuild.c ...@@ -49,6 +49,13 @@ $(OUTPUT)tests/llvm-src-kbuild.c: tests/bpf-script-test-kbuild.c
$(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@ $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
$(Q)echo ';' >> $@ $(Q)echo ';' >> $@
$(OUTPUT)tests/llvm-src-prologue.c: tests/bpf-script-test-prologue.c
$(call rule_mkdir)
$(Q)echo '#include <tests/llvm.h>' > $@
$(Q)echo 'const char test_llvm__bpf_test_prologue_prog[] =' >> $@
$(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
$(Q)echo ';' >> $@
ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64)) ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64))
perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
endif endif
......
...@@ -153,7 +153,7 @@ static int run_dir(const char *d, const char *perf) ...@@ -153,7 +153,7 @@ static int run_dir(const char *d, const char *perf)
return system(cmd); return system(cmd);
} }
int test__attr(void) int test__attr(int subtest __maybe_unused)
{ {
struct stat st; struct stat st;
char path_perf[PATH_MAX]; char path_perf[PATH_MAX];
......
...@@ -111,7 +111,7 @@ static long long bp_count(int fd) ...@@ -111,7 +111,7 @@ static long long bp_count(int fd)
return count; return count;
} }
int test__bp_signal(void) int test__bp_signal(int subtest __maybe_unused)
{ {
struct sigaction sa; struct sigaction sa;
long long count1, count2; long long count1, count2;
......
...@@ -58,7 +58,7 @@ static long long bp_count(int fd) ...@@ -58,7 +58,7 @@ static long long bp_count(int fd)
#define EXECUTIONS 10000 #define EXECUTIONS 10000
#define THRESHOLD 100 #define THRESHOLD 100
int test__bp_signal_overflow(void) int test__bp_signal_overflow(int subtest __maybe_unused)
{ {
struct perf_event_attr pe; struct perf_event_attr pe;
struct sigaction sa; struct sigaction sa;
......
/*
* bpf-script-test-prologue.c
* Test BPF prologue
*/
#ifndef LINUX_VERSION_CODE
# error Need LINUX_VERSION_CODE
# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
#endif
#define SEC(NAME) __attribute__((section(NAME), used))
#include <uapi/linux/fs.h>
#define FMODE_READ 0x1
#define FMODE_WRITE 0x2
static void (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
(void *) 6;
SEC("func=null_lseek file->f_mode offset orig")
int bpf_func__null_lseek(void *ctx, int err, unsigned long f_mode,
unsigned long offset, unsigned long orig)
{
if (err)
return 0;
if (f_mode & FMODE_WRITE)
return 0;
if (offset & 1)
return 0;
if (orig == SEEK_CUR)
return 0;
return 1;
}
char _license[] SEC("license") = "GPL";
int _version SEC("version") = LINUX_VERSION_CODE;
...@@ -19,6 +19,29 @@ static int epoll_pwait_loop(void) ...@@ -19,6 +19,29 @@ static int epoll_pwait_loop(void)
return 0; return 0;
} }
#ifdef HAVE_BPF_PROLOGUE
static int llseek_loop(void)
{
int fds[2], i;
fds[0] = open("/dev/null", O_RDONLY);
fds[1] = open("/dev/null", O_RDWR);
if (fds[0] < 0 || fds[1] < 0)
return -1;
for (i = 0; i < NR_ITERS; i++) {
lseek(fds[i % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
lseek(fds[(i + 1) % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
}
close(fds[0]);
close(fds[1]);
return 0;
}
#endif
static struct { static struct {
enum test_llvm__testcase prog_id; enum test_llvm__testcase prog_id;
const char *desc; const char *desc;
...@@ -37,6 +60,17 @@ static struct { ...@@ -37,6 +60,17 @@ static struct {
&epoll_pwait_loop, &epoll_pwait_loop,
(NR_ITERS + 1) / 2, (NR_ITERS + 1) / 2,
}, },
#ifdef HAVE_BPF_PROLOGUE
{
LLVM_TESTCASE_BPF_PROLOGUE,
"Test BPF prologue generation",
"[bpf_prologue_test]",
"fix kbuild first",
"check your vmlinux setting?",
&llseek_loop,
(NR_ITERS + 1) / 4,
},
#endif
}; };
static int do_test(struct bpf_object *obj, int (*func)(void), static int do_test(struct bpf_object *obj, int (*func)(void),
...@@ -68,7 +102,6 @@ static int do_test(struct bpf_object *obj, int (*func)(void), ...@@ -68,7 +102,6 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
err = parse_events_load_bpf_obj(&parse_evlist, &parse_evlist.list, obj); err = parse_events_load_bpf_obj(&parse_evlist, &parse_evlist.list, obj);
if (err || list_empty(&parse_evlist.list)) { if (err || list_empty(&parse_evlist.list)) {
pr_debug("Failed to add events selected by BPF\n"); pr_debug("Failed to add events selected by BPF\n");
if (!err)
return TEST_FAIL; return TEST_FAIL;
} }
...@@ -123,8 +156,10 @@ static int do_test(struct bpf_object *obj, int (*func)(void), ...@@ -123,8 +156,10 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
} }
} }
if (count != expect) if (count != expect) {
pr_debug("BPF filter result incorrect\n"); pr_debug("BPF filter result incorrect\n");
goto out_delete_evlist;
}
ret = TEST_OK; ret = TEST_OK;
...@@ -146,7 +181,7 @@ prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name) ...@@ -146,7 +181,7 @@ prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name)
return obj; return obj;
} }
static int __test__bpf(int index) static int __test__bpf(int idx)
{ {
int ret; int ret;
void *obj_buf; void *obj_buf;
...@@ -154,54 +189,72 @@ static int __test__bpf(int index) ...@@ -154,54 +189,72 @@ static int __test__bpf(int index)
struct bpf_object *obj; struct bpf_object *obj;
ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz, ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
bpf_testcase_table[index].prog_id, bpf_testcase_table[idx].prog_id,
true); true);
if (ret != TEST_OK || !obj_buf || !obj_buf_sz) { if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
pr_debug("Unable to get BPF object, %s\n", pr_debug("Unable to get BPF object, %s\n",
bpf_testcase_table[index].msg_compile_fail); bpf_testcase_table[idx].msg_compile_fail);
if (index == 0) if (idx == 0)
return TEST_SKIP; return TEST_SKIP;
else else
return TEST_FAIL; return TEST_FAIL;
} }
obj = prepare_bpf(obj_buf, obj_buf_sz, obj = prepare_bpf(obj_buf, obj_buf_sz,
bpf_testcase_table[index].name); bpf_testcase_table[idx].name);
if (!obj) { if (!obj) {
ret = TEST_FAIL; ret = TEST_FAIL;
goto out; goto out;
} }
ret = do_test(obj, ret = do_test(obj,
bpf_testcase_table[index].target_func, bpf_testcase_table[idx].target_func,
bpf_testcase_table[index].expect_result); bpf_testcase_table[idx].expect_result);
out: out:
bpf__clear(); bpf__clear();
return ret; return ret;
} }
int test__bpf(void) int test__bpf_subtest_get_nr(void)
{
return (int)ARRAY_SIZE(bpf_testcase_table);
}
const char *test__bpf_subtest_get_desc(int i)
{
if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
return NULL;
return bpf_testcase_table[i].desc;
}
int test__bpf(int i)
{ {
unsigned int i;
int err; int err;
if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
return TEST_FAIL;
if (geteuid() != 0) { if (geteuid() != 0) {
pr_debug("Only root can run BPF test\n"); pr_debug("Only root can run BPF test\n");
return TEST_SKIP; return TEST_SKIP;
} }
for (i = 0; i < ARRAY_SIZE(bpf_testcase_table); i++) {
err = __test__bpf(i); err = __test__bpf(i);
if (err != TEST_OK)
return err; return err;
}
return TEST_OK;
} }
#else #else
int test__bpf(void) int test__bpf_subtest_get_nr(void)
{
return 0;
}
const char *test__bpf_subtest_get_desc(int i __maybe_unused)
{
return NULL;
}
int test__bpf(int i __maybe_unused)
{ {
pr_debug("Skip BPF test because BPF support is not compiled\n"); pr_debug("Skip BPF test because BPF support is not compiled\n");
return TEST_SKIP; return TEST_SKIP;
......
...@@ -160,6 +160,11 @@ static struct test generic_tests[] = { ...@@ -160,6 +160,11 @@ static struct test generic_tests[] = {
{ {
.desc = "Test LLVM searching and compiling", .desc = "Test LLVM searching and compiling",
.func = test__llvm, .func = test__llvm,
.subtest = {
.skip_if_fail = true,
.get_nr = test__llvm_subtest_get_nr,
.get_desc = test__llvm_subtest_get_desc,
},
}, },
{ {
.desc = "Test topology in session", .desc = "Test topology in session",
...@@ -168,6 +173,11 @@ static struct test generic_tests[] = { ...@@ -168,6 +173,11 @@ static struct test generic_tests[] = {
{ {
.desc = "Test BPF filter", .desc = "Test BPF filter",
.func = test__bpf, .func = test__bpf,
.subtest = {
.skip_if_fail = true,
.get_nr = test__bpf_subtest_get_nr,
.get_desc = test__bpf_subtest_get_desc,
},
}, },
{ {
.func = NULL, .func = NULL,
...@@ -203,7 +213,7 @@ static bool perf_test__matches(struct test *test, int curr, int argc, const char ...@@ -203,7 +213,7 @@ static bool perf_test__matches(struct test *test, int curr, int argc, const char
return false; return false;
} }
static int run_test(struct test *test) static int run_test(struct test *test, int subtest)
{ {
int status, err = -1, child = fork(); int status, err = -1, child = fork();
char sbuf[STRERR_BUFSIZE]; char sbuf[STRERR_BUFSIZE];
...@@ -216,7 +226,19 @@ static int run_test(struct test *test) ...@@ -216,7 +226,19 @@ static int run_test(struct test *test)
if (!child) { if (!child) {
pr_debug("test child forked, pid %d\n", getpid()); pr_debug("test child forked, pid %d\n", getpid());
err = test->func(); if (!verbose) {
int nullfd = open("/dev/null", O_WRONLY);
if (nullfd >= 0) {
close(STDERR_FILENO);
close(STDOUT_FILENO);
dup2(nullfd, STDOUT_FILENO);
dup2(STDOUT_FILENO, STDERR_FILENO);
close(nullfd);
}
}
err = test->func(subtest);
exit(err); exit(err);
} }
...@@ -237,6 +259,40 @@ static int run_test(struct test *test) ...@@ -237,6 +259,40 @@ static int run_test(struct test *test)
for (j = 0; j < ARRAY_SIZE(tests); j++) \ for (j = 0; j < ARRAY_SIZE(tests); j++) \
for (t = &tests[j][0]; t->func; t++) for (t = &tests[j][0]; t->func; t++)
static int test_and_print(struct test *t, bool force_skip, int subtest)
{
int err;
if (!force_skip) {
pr_debug("\n--- start ---\n");
err = run_test(t, subtest);
pr_debug("---- end ----\n");
} else {
pr_debug("\n--- force skipped ---\n");
err = TEST_SKIP;
}
if (!t->subtest.get_nr)
pr_debug("%s:", t->desc);
else
pr_debug("%s subtest %d:", t->desc, subtest);
switch (err) {
case TEST_OK:
pr_info(" Ok\n");
break;
case TEST_SKIP:
color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
break;
case TEST_FAIL:
default:
color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
break;
}
return err;
}
static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist) static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
{ {
struct test *t; struct test *t;
...@@ -264,21 +320,43 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist) ...@@ -264,21 +320,43 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
continue; continue;
} }
pr_debug("\n--- start ---\n"); if (!t->subtest.get_nr) {
err = run_test(t); test_and_print(t, false, -1);
pr_debug("---- end ----\n%s:", t->desc); } else {
int subn = t->subtest.get_nr();
/*
* minus 2 to align with normal testcases.
* For subtest we print additional '.x' in number.
* for example:
*
* 35: Test LLVM searching and compiling :
* 35.1: Basic BPF llvm compiling test : Ok
*/
int subw = width > 2 ? width - 2 : width;
bool skip = false;
int subi;
switch (err) { if (subn <= 0) {
case TEST_OK: color_fprintf(stderr, PERF_COLOR_YELLOW,
pr_info(" Ok\n"); " Skip (not compiled in)\n");
break; continue;
case TEST_SKIP: }
color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n"); pr_info("\n");
break;
case TEST_FAIL: for (subi = 0; subi < subn; subi++) {
default: int len = strlen(t->subtest.get_desc(subi));
color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
break; if (subw < len)
subw = len;
}
for (subi = 0; subi < subn; subi++) {
pr_info("%2d.%1d: %-*s:", i, subi + 1, subw,
t->subtest.get_desc(subi));
err = test_and_print(t, skip, subi);
if (err != TEST_OK && t->subtest.skip_if_fail)
skip = true;
}
} }
} }
......
...@@ -601,7 +601,7 @@ static int do_test_code_reading(bool try_kcore) ...@@ -601,7 +601,7 @@ static int do_test_code_reading(bool try_kcore)
return err; return err;
} }
int test__code_reading(void) int test__code_reading(int subtest __maybe_unused)
{ {
int ret; int ret;
......
...@@ -110,7 +110,7 @@ static int dso__data_fd(struct dso *dso, struct machine *machine) ...@@ -110,7 +110,7 @@ static int dso__data_fd(struct dso *dso, struct machine *machine)
return fd; return fd;
} }
int test__dso_data(void) int test__dso_data(int subtest __maybe_unused)
{ {
struct machine machine; struct machine machine;
struct dso *dso; struct dso *dso;
...@@ -245,7 +245,7 @@ static int set_fd_limit(int n) ...@@ -245,7 +245,7 @@ static int set_fd_limit(int n)
return setrlimit(RLIMIT_NOFILE, &rlim); return setrlimit(RLIMIT_NOFILE, &rlim);
} }
int test__dso_data_cache(void) int test__dso_data_cache(int subtest __maybe_unused)
{ {
struct machine machine; struct machine machine;
long nr_end, nr = open_files_cnt(); long nr_end, nr = open_files_cnt();
...@@ -302,7 +302,7 @@ int test__dso_data_cache(void) ...@@ -302,7 +302,7 @@ int test__dso_data_cache(void)
return 0; return 0;
} }
int test__dso_data_reopen(void) int test__dso_data_reopen(int subtest __maybe_unused)
{ {
struct machine machine; struct machine machine;
long nr_end, nr = open_files_cnt(); long nr_end, nr = open_files_cnt();
......
...@@ -142,7 +142,7 @@ static int krava_1(struct thread *thread) ...@@ -142,7 +142,7 @@ static int krava_1(struct thread *thread)
return krava_2(thread); return krava_2(thread);
} }
int test__dwarf_unwind(void) int test__dwarf_unwind(int subtest __maybe_unused)
{ {
struct machines machines; struct machines machines;
struct machine *machine; struct machine *machine;
......
...@@ -95,7 +95,7 @@ static int __perf_evsel__name_array_test(const char *names[], int nr_names) ...@@ -95,7 +95,7 @@ static int __perf_evsel__name_array_test(const char *names[], int nr_names)
#define perf_evsel__name_array_test(names) \ #define perf_evsel__name_array_test(names) \
__perf_evsel__name_array_test(names, ARRAY_SIZE(names)) __perf_evsel__name_array_test(names, ARRAY_SIZE(names))
int test__perf_evsel__roundtrip_name_test(void) int test__perf_evsel__roundtrip_name_test(int subtest __maybe_unused)
{ {
int err = 0, ret = 0; int err = 0, ret = 0;
......
...@@ -32,7 +32,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, ...@@ -32,7 +32,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
return ret; return ret;
} }
int test__perf_evsel__tp_sched_test(void) int test__perf_evsel__tp_sched_test(int subtest __maybe_unused)
{ {
struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch"); struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch");
int ret = 0; int ret = 0;
......
...@@ -25,7 +25,7 @@ static int fdarray__fprintf_prefix(struct fdarray *fda, const char *prefix, FILE ...@@ -25,7 +25,7 @@ static int fdarray__fprintf_prefix(struct fdarray *fda, const char *prefix, FILE
return printed + fdarray__fprintf(fda, fp); return printed + fdarray__fprintf(fda, fp);
} }
int test__fdarray__filter(void) int test__fdarray__filter(int subtest __maybe_unused)
{ {
int nr_fds, expected_fd[2], fd, err = TEST_FAIL; int nr_fds, expected_fd[2], fd, err = TEST_FAIL;
struct fdarray *fda = fdarray__new(5, 5); struct fdarray *fda = fdarray__new(5, 5);
...@@ -103,7 +103,7 @@ int test__fdarray__filter(void) ...@@ -103,7 +103,7 @@ int test__fdarray__filter(void)
return err; return err;
} }
int test__fdarray__add(void) int test__fdarray__add(int subtest __maybe_unused)
{ {
int err = TEST_FAIL; int err = TEST_FAIL;
struct fdarray *fda = fdarray__new(2, 2); struct fdarray *fda = fdarray__new(2, 2);
......
...@@ -686,7 +686,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine) ...@@ -686,7 +686,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
return err; return err;
} }
int test__hists_cumulate(void) int test__hists_cumulate(int subtest __maybe_unused)
{ {
int err = TEST_FAIL; int err = TEST_FAIL;
struct machines machines; struct machines machines;
......
...@@ -104,7 +104,7 @@ static int add_hist_entries(struct perf_evlist *evlist, ...@@ -104,7 +104,7 @@ static int add_hist_entries(struct perf_evlist *evlist,
return TEST_FAIL; return TEST_FAIL;
} }
int test__hists_filter(void) int test__hists_filter(int subtest __maybe_unused)
{ {
int err = TEST_FAIL; int err = TEST_FAIL;
struct machines machines; struct machines machines;
......
...@@ -274,7 +274,7 @@ static int validate_link(struct hists *leader, struct hists *other) ...@@ -274,7 +274,7 @@ static int validate_link(struct hists *leader, struct hists *other)
return __validate_link(leader, 0) || __validate_link(other, 1); return __validate_link(leader, 0) || __validate_link(other, 1);
} }
int test__hists_link(void) int test__hists_link(int subtest __maybe_unused)
{ {
int err = -1; int err = -1;
struct hists *hists, *first_hists; struct hists *hists, *first_hists;
......
...@@ -576,7 +576,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine) ...@@ -576,7 +576,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
return err; return err;
} }
int test__hists_output(void) int test__hists_output(int subtest __maybe_unused)
{ {
int err = TEST_FAIL; int err = TEST_FAIL;
struct machines machines; struct machines machines;
......
...@@ -49,7 +49,7 @@ static int find_comm(struct perf_evlist *evlist, const char *comm) ...@@ -49,7 +49,7 @@ static int find_comm(struct perf_evlist *evlist, const char *comm)
* when an event is disabled but a dummy software event is not disabled. If the * when an event is disabled but a dummy software event is not disabled. If the
* test passes %0 is returned, otherwise %-1 is returned. * test passes %0 is returned, otherwise %-1 is returned.
*/ */
int test__keep_tracking(void) int test__keep_tracking(int subtest __maybe_unused)
{ {
struct record_opts opts = { struct record_opts opts = {
.mmap_pages = UINT_MAX, .mmap_pages = UINT_MAX,
......
...@@ -49,7 +49,7 @@ static int test_is_kernel_module(const char *path, int cpumode, bool expect) ...@@ -49,7 +49,7 @@ static int test_is_kernel_module(const char *path, int cpumode, bool expect)
#define M(path, c, e) \ #define M(path, c, e) \
TEST_ASSERT_VAL("failed", !test_is_kernel_module(path, c, e)) TEST_ASSERT_VAL("failed", !test_is_kernel_module(path, c, e))
int test__kmod_path__parse(void) int test__kmod_path__parse(int subtest __maybe_unused)
{ {
/* path alloc_name alloc_ext kmod comp name ext */ /* path alloc_name alloc_ext kmod comp name ext */
T("/xxxx/xxxx/x-x.ko", true , true , true, false, "[x_x]", NULL); T("/xxxx/xxxx/x-x.ko", true , true , true, false, "[x_x]", NULL);
......
...@@ -44,13 +44,17 @@ static struct { ...@@ -44,13 +44,17 @@ static struct {
.source = test_llvm__bpf_test_kbuild_prog, .source = test_llvm__bpf_test_kbuild_prog,
.desc = "Test kbuild searching", .desc = "Test kbuild searching",
}, },
[LLVM_TESTCASE_BPF_PROLOGUE] = {
.source = test_llvm__bpf_test_prologue_prog,
.desc = "Compile source for BPF prologue generation test",
},
}; };
int int
test_llvm__fetch_bpf_obj(void **p_obj_buf, test_llvm__fetch_bpf_obj(void **p_obj_buf,
size_t *p_obj_buf_sz, size_t *p_obj_buf_sz,
enum test_llvm__testcase index, enum test_llvm__testcase idx,
bool force) bool force)
{ {
const char *source; const char *source;
...@@ -59,11 +63,11 @@ test_llvm__fetch_bpf_obj(void **p_obj_buf, ...@@ -59,11 +63,11 @@ test_llvm__fetch_bpf_obj(void **p_obj_buf,
char *tmpl_new = NULL, *clang_opt_new = NULL; char *tmpl_new = NULL, *clang_opt_new = NULL;
int err, old_verbose, ret = TEST_FAIL; int err, old_verbose, ret = TEST_FAIL;
if (index >= __LLVM_TESTCASE_MAX) if (idx >= __LLVM_TESTCASE_MAX)
return TEST_FAIL; return TEST_FAIL;
source = bpf_source_table[index].source; source = bpf_source_table[idx].source;
desc = bpf_source_table[index].desc; desc = bpf_source_table[idx].desc;
perf_config(perf_config_cb, NULL); perf_config(perf_config_cb, NULL);
...@@ -127,44 +131,39 @@ test_llvm__fetch_bpf_obj(void **p_obj_buf, ...@@ -127,44 +131,39 @@ test_llvm__fetch_bpf_obj(void **p_obj_buf,
return ret; return ret;
} }
int test__llvm(void) int test__llvm(int subtest)
{ {
enum test_llvm__testcase i;
for (i = 0; i < __LLVM_TESTCASE_MAX; i++) {
int ret; int ret;
void *obj_buf = NULL; void *obj_buf = NULL;
size_t obj_buf_sz = 0; size_t obj_buf_sz = 0;
if ((subtest < 0) || (subtest >= __LLVM_TESTCASE_MAX))
return TEST_FAIL;
ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz, ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
i, false); subtest, false);
if (ret == TEST_OK) { if (ret == TEST_OK) {
ret = test__bpf_parsing(obj_buf, obj_buf_sz); ret = test__bpf_parsing(obj_buf, obj_buf_sz);
if (ret != TEST_OK) if (ret != TEST_OK) {
pr_debug("Failed to parse test case '%s'\n", pr_debug("Failed to parse test case '%s'\n",
bpf_source_table[i].desc); bpf_source_table[subtest].desc);
}
} }
free(obj_buf); free(obj_buf);
switch (ret) { return ret;
case TEST_SKIP: }
return TEST_SKIP;
case TEST_OK: int test__llvm_subtest_get_nr(void)
break; {
default: return __LLVM_TESTCASE_MAX;
/* }
* Test 0 is the basic LLVM test. If test 0
* fail, the basic LLVM support not functional const char *test__llvm_subtest_get_desc(int subtest)
* so the whole test should fail. If other test {
* case fail, it can be fixed by adjusting if ((subtest < 0) || (subtest >= __LLVM_TESTCASE_MAX))
* config so don't report error. return NULL;
*/
if (i == 0) return bpf_source_table[subtest].desc;
return TEST_FAIL;
else
return TEST_SKIP;
}
}
return TEST_OK;
} }
...@@ -6,10 +6,12 @@ ...@@ -6,10 +6,12 @@
extern const char test_llvm__bpf_base_prog[]; extern const char test_llvm__bpf_base_prog[];
extern const char test_llvm__bpf_test_kbuild_prog[]; extern const char test_llvm__bpf_test_kbuild_prog[];
extern const char test_llvm__bpf_test_prologue_prog[];
enum test_llvm__testcase { enum test_llvm__testcase {
LLVM_TESTCASE_BASE, LLVM_TESTCASE_BASE,
LLVM_TESTCASE_KBUILD, LLVM_TESTCASE_KBUILD,
LLVM_TESTCASE_BPF_PROLOGUE,
__LLVM_TESTCASE_MAX, __LLVM_TESTCASE_MAX,
}; };
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
* Then it checks if the number of syscalls reported as perf events by * Then it checks if the number of syscalls reported as perf events by
* the kernel corresponds to the number of syscalls made. * the kernel corresponds to the number of syscalls made.
*/ */
int test__basic_mmap(void) int test__basic_mmap(int subtest __maybe_unused)
{ {
int err = -1; int err = -1;
union perf_event *event; union perf_event *event;
......
...@@ -221,7 +221,7 @@ static int mmap_events(synth_cb synth) ...@@ -221,7 +221,7 @@ static int mmap_events(synth_cb synth)
* *
* by using all thread objects. * by using all thread objects.
*/ */
int test__mmap_thread_lookup(void) int test__mmap_thread_lookup(int subtest __maybe_unused)
{ {
/* perf_event__synthesize_threads synthesize */ /* perf_event__synthesize_threads synthesize */
TEST_ASSERT_VAL("failed with sythesizing all", TEST_ASSERT_VAL("failed with sythesizing all",
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include "debug.h" #include "debug.h"
#include "stat.h" #include "stat.h"
int test__openat_syscall_event_on_all_cpus(void) int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused)
{ {
int err = -1, fd, cpu; int err = -1, fd, cpu;
struct cpu_map *cpus; struct cpu_map *cpus;
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include "tests.h" #include "tests.h"
#include "debug.h" #include "debug.h"
int test__syscall_openat_tp_fields(void) int test__syscall_openat_tp_fields(int subtest __maybe_unused)
{ {
struct record_opts opts = { struct record_opts opts = {
.target = { .target = {
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#include "debug.h" #include "debug.h"
#include "tests.h" #include "tests.h"
int test__openat_syscall_event(void) int test__openat_syscall_event(int subtest __maybe_unused)
{ {
int err = -1, fd; int err = -1, fd;
struct perf_evsel *evsel; struct perf_evsel *evsel;
......
...@@ -1765,7 +1765,7 @@ static void debug_warn(const char *warn, va_list params) ...@@ -1765,7 +1765,7 @@ static void debug_warn(const char *warn, va_list params)
fprintf(stderr, " Warning: %s\n", msg); fprintf(stderr, " Warning: %s\n", msg);
} }
int test__parse_events(void) int test__parse_events(int subtest __maybe_unused)
{ {
int ret1, ret2 = 0; int ret1, ret2 = 0;
......
...@@ -67,7 +67,7 @@ struct test_attr_event { ...@@ -67,7 +67,7 @@ struct test_attr_event {
* *
* Return: %0 on success, %-1 if the test fails. * Return: %0 on success, %-1 if the test fails.
*/ */
int test__parse_no_sample_id_all(void) int test__parse_no_sample_id_all(int subtest __maybe_unused)
{ {
int err; int err;
......
...@@ -32,7 +32,7 @@ static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp) ...@@ -32,7 +32,7 @@ static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
return cpu; return cpu;
} }
int test__PERF_RECORD(void) int test__PERF_RECORD(int subtest __maybe_unused)
{ {
struct record_opts opts = { struct record_opts opts = {
.target = { .target = {
......
...@@ -133,7 +133,7 @@ static struct list_head *test_terms_list(void) ...@@ -133,7 +133,7 @@ static struct list_head *test_terms_list(void)
return &terms; return &terms;
} }
int test__pmu(void) int test__pmu(int subtest __maybe_unused)
{ {
char *format = test_format_dir_get(); char *format = test_format_dir_get();
LIST_HEAD(formats); LIST_HEAD(formats);
......
...@@ -4,11 +4,12 @@ ...@@ -4,11 +4,12 @@
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <linux/compiler.h>
#include "tests.h" #include "tests.h"
extern int verbose; extern int verbose;
int test__python_use(void) int test__python_use(int subtest __maybe_unused)
{ {
char *cmd; char *cmd;
int ret; int ret;
......
...@@ -290,7 +290,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format) ...@@ -290,7 +290,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
* checks sample format bits separately and together. If the test passes %0 is * checks sample format bits separately and together. If the test passes %0 is
* returned, otherwise %-1 is returned. * returned, otherwise %-1 is returned.
*/ */
int test__sample_parsing(void) int test__sample_parsing(int subtest __maybe_unused)
{ {
const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15}; const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
u64 sample_type; u64 sample_type;
......
...@@ -122,7 +122,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) ...@@ -122,7 +122,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
return err; return err;
} }
int test__sw_clock_freq(void) int test__sw_clock_freq(int subtest __maybe_unused)
{ {
int ret; int ret;
......
...@@ -305,7 +305,7 @@ static int process_events(struct perf_evlist *evlist, ...@@ -305,7 +305,7 @@ static int process_events(struct perf_evlist *evlist,
* evsel->system_wide and evsel->tracking flags (respectively) with other events * evsel->system_wide and evsel->tracking flags (respectively) with other events
* sometimes enabled or disabled. * sometimes enabled or disabled.
*/ */
int test__switch_tracking(void) int test__switch_tracking(int subtest __maybe_unused)
{ {
const char *sched_switch = "sched:sched_switch"; const char *sched_switch = "sched:sched_switch";
struct switch_tracking switch_tracking = { .tids = NULL, }; struct switch_tracking switch_tracking = { .tids = NULL, };
......
...@@ -31,7 +31,7 @@ static void workload_exec_failed_signal(int signo __maybe_unused, ...@@ -31,7 +31,7 @@ static void workload_exec_failed_signal(int signo __maybe_unused,
* if the number of exit event reported by the kernel is 1 or not * if the number of exit event reported by the kernel is 1 or not
* in order to check the kernel returns correct number of event. * in order to check the kernel returns correct number of event.
*/ */
int test__task_exit(void) int test__task_exit(int subtest __maybe_unused)
{ {
int err = -1; int err = -1;
union perf_event *event; union perf_event *event;
......
#ifndef TESTS_H #ifndef TESTS_H
#define TESTS_H #define TESTS_H
#include <stdbool.h>
#define TEST_ASSERT_VAL(text, cond) \ #define TEST_ASSERT_VAL(text, cond) \
do { \ do { \
if (!(cond)) { \ if (!(cond)) { \
...@@ -26,48 +28,57 @@ enum { ...@@ -26,48 +28,57 @@ enum {
struct test { struct test {
const char *desc; const char *desc;
int (*func)(void); int (*func)(int subtest);
struct {
bool skip_if_fail;
int (*get_nr)(void);
const char *(*get_desc)(int subtest);
} subtest;
}; };
/* Tests */ /* Tests */
int test__vmlinux_matches_kallsyms(void); int test__vmlinux_matches_kallsyms(int subtest);
int test__openat_syscall_event(void); int test__openat_syscall_event(int subtest);
int test__openat_syscall_event_on_all_cpus(void); int test__openat_syscall_event_on_all_cpus(int subtest);
int test__basic_mmap(void); int test__basic_mmap(int subtest);
int test__PERF_RECORD(void); int test__PERF_RECORD(int subtest);
int test__perf_evsel__roundtrip_name_test(void); int test__perf_evsel__roundtrip_name_test(int subtest);
int test__perf_evsel__tp_sched_test(void); int test__perf_evsel__tp_sched_test(int subtest);
int test__syscall_openat_tp_fields(void); int test__syscall_openat_tp_fields(int subtest);
int test__pmu(void); int test__pmu(int subtest);
int test__attr(void); int test__attr(int subtest);
int test__dso_data(void); int test__dso_data(int subtest);
int test__dso_data_cache(void); int test__dso_data_cache(int subtest);
int test__dso_data_reopen(void); int test__dso_data_reopen(int subtest);
int test__parse_events(void); int test__parse_events(int subtest);
int test__hists_link(void); int test__hists_link(int subtest);
int test__python_use(void); int test__python_use(int subtest);
int test__bp_signal(void); int test__bp_signal(int subtest);
int test__bp_signal_overflow(void); int test__bp_signal_overflow(int subtest);
int test__task_exit(void); int test__task_exit(int subtest);
int test__sw_clock_freq(void); int test__sw_clock_freq(int subtest);
int test__code_reading(void); int test__code_reading(int subtest);
int test__sample_parsing(void); int test__sample_parsing(int subtest);
int test__keep_tracking(void); int test__keep_tracking(int subtest);
int test__parse_no_sample_id_all(void); int test__parse_no_sample_id_all(int subtest);
int test__dwarf_unwind(void); int test__dwarf_unwind(int subtest);
int test__hists_filter(void); int test__hists_filter(int subtest);
int test__mmap_thread_lookup(void); int test__mmap_thread_lookup(int subtest);
int test__thread_mg_share(void); int test__thread_mg_share(int subtest);
int test__hists_output(void); int test__hists_output(int subtest);
int test__hists_cumulate(void); int test__hists_cumulate(int subtest);
int test__switch_tracking(void); int test__switch_tracking(int subtest);
int test__fdarray__filter(void); int test__fdarray__filter(int subtest);
int test__fdarray__add(void); int test__fdarray__add(int subtest);
int test__kmod_path__parse(void); int test__kmod_path__parse(int subtest);
int test__thread_map(void); int test__thread_map(int subtest);
int test__llvm(void); int test__llvm(int subtest);
int test__bpf(void); const char *test__llvm_subtest_get_desc(int subtest);
int test_session_topology(void); int test__llvm_subtest_get_nr(void);
int test__bpf(int subtest);
const char *test__bpf_subtest_get_desc(int subtest);
int test__bpf_subtest_get_nr(void);
int test_session_topology(int subtest);
#if defined(__arm__) || defined(__aarch64__) #if defined(__arm__) || defined(__aarch64__)
#ifdef HAVE_DWARF_UNWIND_SUPPORT #ifdef HAVE_DWARF_UNWIND_SUPPORT
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#include "thread_map.h" #include "thread_map.h"
#include "debug.h" #include "debug.h"
int test__thread_map(void) int test__thread_map(int subtest __maybe_unused)
{ {
struct thread_map *map; struct thread_map *map;
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#include "map.h" #include "map.h"
#include "debug.h" #include "debug.h"
int test__thread_mg_share(void) int test__thread_mg_share(int subtest __maybe_unused)
{ {
struct machines machines; struct machines machines;
struct machine *machine; struct machine *machine;
......
...@@ -84,7 +84,7 @@ static int check_cpu_topology(char *path, struct cpu_map *map) ...@@ -84,7 +84,7 @@ static int check_cpu_topology(char *path, struct cpu_map *map)
return 0; return 0;
} }
int test_session_topology(void) int test_session_topology(int subtest __maybe_unused)
{ {
char path[PATH_MAX]; char path[PATH_MAX];
struct cpu_map *map; struct cpu_map *map;
......
...@@ -18,7 +18,7 @@ static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused, ...@@ -18,7 +18,7 @@ static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused,
#define UM(x) kallsyms_map->unmap_ip(kallsyms_map, (x)) #define UM(x) kallsyms_map->unmap_ip(kallsyms_map, (x))
int test__vmlinux_matches_kallsyms(void) int test__vmlinux_matches_kallsyms(int subtest __maybe_unused)
{ {
int err = -1; int err = -1;
struct rb_node *nd; struct rb_node *nd;
......
This diff is collapsed.
...@@ -89,7 +89,7 @@ void perf_gtk__init_hpp(void) ...@@ -89,7 +89,7 @@ void perf_gtk__init_hpp(void)
perf_gtk__hpp_color_overhead_acc; perf_gtk__hpp_color_overhead_acc;
} }
static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store, static void perf_gtk__add_callchain_flat(struct rb_root *root, GtkTreeStore *store,
GtkTreeIter *parent, int col, u64 total) GtkTreeIter *parent, int col, u64 total)
{ {
struct rb_node *nd; struct rb_node *nd;
...@@ -100,13 +100,132 @@ static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store, ...@@ -100,13 +100,132 @@ static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store,
struct callchain_list *chain; struct callchain_list *chain;
GtkTreeIter iter, new_parent; GtkTreeIter iter, new_parent;
bool need_new_parent; bool need_new_parent;
double percent;
u64 hits, child_total;
node = rb_entry(nd, struct callchain_node, rb_node); node = rb_entry(nd, struct callchain_node, rb_node);
hits = callchain_cumul_hits(node); new_parent = *parent;
percent = 100.0 * hits / total; need_new_parent = !has_single_node;
callchain_node__make_parent_list(node);
list_for_each_entry(chain, &node->parent_val, list) {
char buf[128];
gtk_tree_store_append(store, &iter, &new_parent);
callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
gtk_tree_store_set(store, &iter, 0, buf, -1);
callchain_list__sym_name(chain, buf, sizeof(buf), false);
gtk_tree_store_set(store, &iter, col, buf, -1);
if (need_new_parent) {
/*
* Only show the top-most symbol in a callchain
* if it's not the only callchain.
*/
new_parent = iter;
need_new_parent = false;
}
}
list_for_each_entry(chain, &node->val, list) {
char buf[128];
gtk_tree_store_append(store, &iter, &new_parent);
callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
gtk_tree_store_set(store, &iter, 0, buf, -1);
callchain_list__sym_name(chain, buf, sizeof(buf), false);
gtk_tree_store_set(store, &iter, col, buf, -1);
if (need_new_parent) {
/*
* Only show the top-most symbol in a callchain
* if it's not the only callchain.
*/
new_parent = iter;
need_new_parent = false;
}
}
}
}
static void perf_gtk__add_callchain_folded(struct rb_root *root, GtkTreeStore *store,
GtkTreeIter *parent, int col, u64 total)
{
struct rb_node *nd;
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
struct callchain_node *node;
struct callchain_list *chain;
GtkTreeIter iter;
char buf[64];
char *str, *str_alloc = NULL;
bool first = true;
node = rb_entry(nd, struct callchain_node, rb_node);
callchain_node__make_parent_list(node);
list_for_each_entry(chain, &node->parent_val, list) {
char name[1024];
callchain_list__sym_name(chain, name, sizeof(name), false);
if (asprintf(&str, "%s%s%s",
first ? "" : str_alloc,
first ? "" : symbol_conf.field_sep ?: "; ",
name) < 0)
return;
first = false;
free(str_alloc);
str_alloc = str;
}
list_for_each_entry(chain, &node->val, list) {
char name[1024];
callchain_list__sym_name(chain, name, sizeof(name), false);
if (asprintf(&str, "%s%s%s",
first ? "" : str_alloc,
first ? "" : symbol_conf.field_sep ?: "; ",
name) < 0)
return;
first = false;
free(str_alloc);
str_alloc = str;
}
gtk_tree_store_append(store, &iter, parent);
callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
gtk_tree_store_set(store, &iter, 0, buf, -1);
gtk_tree_store_set(store, &iter, col, str, -1);
free(str_alloc);
}
}
static void perf_gtk__add_callchain_graph(struct rb_root *root, GtkTreeStore *store,
GtkTreeIter *parent, int col, u64 total)
{
struct rb_node *nd;
bool has_single_node = (rb_first(root) == rb_last(root));
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
struct callchain_node *node;
struct callchain_list *chain;
GtkTreeIter iter, new_parent;
bool need_new_parent;
u64 child_total;
node = rb_entry(nd, struct callchain_node, rb_node);
new_parent = *parent; new_parent = *parent;
need_new_parent = !has_single_node && (node->val_nr > 1); need_new_parent = !has_single_node && (node->val_nr > 1);
...@@ -116,7 +235,7 @@ static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store, ...@@ -116,7 +235,7 @@ static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store,
gtk_tree_store_append(store, &iter, &new_parent); gtk_tree_store_append(store, &iter, &new_parent);
scnprintf(buf, sizeof(buf), "%5.2f%%", percent); callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
gtk_tree_store_set(store, &iter, 0, buf, -1); gtk_tree_store_set(store, &iter, 0, buf, -1);
callchain_list__sym_name(chain, buf, sizeof(buf), false); callchain_list__sym_name(chain, buf, sizeof(buf), false);
...@@ -138,11 +257,22 @@ static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store, ...@@ -138,11 +257,22 @@ static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store,
child_total = total; child_total = total;
/* Now 'iter' contains info of the last callchain_list */ /* Now 'iter' contains info of the last callchain_list */
perf_gtk__add_callchain(&node->rb_root, store, &iter, col, perf_gtk__add_callchain_graph(&node->rb_root, store, &iter, col,
child_total); child_total);
} }
} }
static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store,
GtkTreeIter *parent, int col, u64 total)
{
if (callchain_param.mode == CHAIN_FLAT)
perf_gtk__add_callchain_flat(root, store, parent, col, total);
else if (callchain_param.mode == CHAIN_FOLDED)
perf_gtk__add_callchain_folded(root, store, parent, col, total);
else
perf_gtk__add_callchain_graph(root, store, parent, col, total);
}
static void on_row_activated(GtkTreeView *view, GtkTreePath *path, static void on_row_activated(GtkTreeView *view, GtkTreePath *path,
GtkTreeViewColumn *col __maybe_unused, GtkTreeViewColumn *col __maybe_unused,
gpointer user_data __maybe_unused) gpointer user_data __maybe_unused)
......
...@@ -34,10 +34,10 @@ static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, ...@@ -34,10 +34,10 @@ static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
return ret; return ret;
} }
static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
struct callchain_list *chain,
int depth, int depth_mask, int period, int depth, int depth_mask, int period,
u64 total_samples, u64 hits, u64 total_samples, int left_margin)
int left_margin)
{ {
int i; int i;
size_t ret = 0; size_t ret = 0;
...@@ -50,10 +50,9 @@ static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, ...@@ -50,10 +50,9 @@ static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
else else
ret += fprintf(fp, " "); ret += fprintf(fp, " ");
if (!period && i == depth - 1) { if (!period && i == depth - 1) {
double percent; ret += fprintf(fp, "--");
ret += callchain_node__fprintf_value(node, fp, total_samples);
percent = hits * 100.0 / total_samples; ret += fprintf(fp, "--");
ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
} else } else
ret += fprintf(fp, "%s", " "); ret += fprintf(fp, "%s", " ");
} }
...@@ -82,13 +81,14 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root, ...@@ -82,13 +81,14 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
int depth_mask, int left_margin) int depth_mask, int left_margin)
{ {
struct rb_node *node, *next; struct rb_node *node, *next;
struct callchain_node *child; struct callchain_node *child = NULL;
struct callchain_list *chain; struct callchain_list *chain;
int new_depth_mask = depth_mask; int new_depth_mask = depth_mask;
u64 remaining; u64 remaining;
size_t ret = 0; size_t ret = 0;
int i; int i;
uint entries_printed = 0; uint entries_printed = 0;
int cumul_count = 0;
remaining = total_samples; remaining = total_samples;
...@@ -100,6 +100,7 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root, ...@@ -100,6 +100,7 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
child = rb_entry(node, struct callchain_node, rb_node); child = rb_entry(node, struct callchain_node, rb_node);
cumul = callchain_cumul_hits(child); cumul = callchain_cumul_hits(child);
remaining -= cumul; remaining -= cumul;
cumul_count += callchain_cumul_counts(child);
/* /*
* The depth mask manages the output of pipes that show * The depth mask manages the output of pipes that show
...@@ -120,10 +121,9 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root, ...@@ -120,10 +121,9 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
left_margin); left_margin);
i = 0; i = 0;
list_for_each_entry(chain, &child->val, list) { list_for_each_entry(chain, &child->val, list) {
ret += ipchain__fprintf_graph(fp, chain, depth, ret += ipchain__fprintf_graph(fp, child, chain, depth,
new_depth_mask, i++, new_depth_mask, i++,
total_samples, total_samples,
cumul,
left_margin); left_margin);
} }
...@@ -143,14 +143,23 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root, ...@@ -143,14 +143,23 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
if (callchain_param.mode == CHAIN_GRAPH_REL && if (callchain_param.mode == CHAIN_GRAPH_REL &&
remaining && remaining != total_samples) { remaining && remaining != total_samples) {
struct callchain_node rem_node = {
.hit = remaining,
};
if (!rem_sq_bracket) if (!rem_sq_bracket)
return ret; return ret;
if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
rem_node.count = child->parent->children_count - cumul_count;
if (rem_node.count <= 0)
return ret;
}
new_depth_mask &= ~(1 << (depth - 1)); new_depth_mask &= ~(1 << (depth - 1));
ret += ipchain__fprintf_graph(fp, &rem_hits, depth, ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
new_depth_mask, 0, total_samples, new_depth_mask, 0, total_samples,
remaining, left_margin); left_margin);
} }
return ret; return ret;
...@@ -243,12 +252,11 @@ static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree, ...@@ -243,12 +252,11 @@ static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
struct rb_node *rb_node = rb_first(tree); struct rb_node *rb_node = rb_first(tree);
while (rb_node) { while (rb_node) {
double percent;
chain = rb_entry(rb_node, struct callchain_node, rb_node); chain = rb_entry(rb_node, struct callchain_node, rb_node);
percent = chain->hit * 100.0 / total_samples;
ret = percent_color_fprintf(fp, " %6.2f%%\n", percent); ret += fprintf(fp, " ");
ret += callchain_node__fprintf_value(chain, fp, total_samples);
ret += fprintf(fp, "\n");
ret += __callchain__fprintf_flat(fp, chain, total_samples); ret += __callchain__fprintf_flat(fp, chain, total_samples);
ret += fprintf(fp, "\n"); ret += fprintf(fp, "\n");
if (++entries_printed == callchain_param.print_limit) if (++entries_printed == callchain_param.print_limit)
...@@ -260,6 +268,57 @@ static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree, ...@@ -260,6 +268,57 @@ static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
return ret; return ret;
} }
static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
{
const char *sep = symbol_conf.field_sep ?: ";";
struct callchain_list *chain;
size_t ret = 0;
char bf[1024];
bool first;
if (!node)
return 0;
ret += __callchain__fprintf_folded(fp, node->parent);
first = (ret == 0);
list_for_each_entry(chain, &node->val, list) {
if (chain->ip >= PERF_CONTEXT_MAX)
continue;
ret += fprintf(fp, "%s%s", first ? "" : sep,
callchain_list__sym_name(chain,
bf, sizeof(bf), false));
first = false;
}
return ret;
}
static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
u64 total_samples)
{
size_t ret = 0;
u32 entries_printed = 0;
struct callchain_node *chain;
struct rb_node *rb_node = rb_first(tree);
while (rb_node) {
chain = rb_entry(rb_node, struct callchain_node, rb_node);
ret += callchain_node__fprintf_value(chain, fp, total_samples);
ret += fprintf(fp, " ");
ret += __callchain__fprintf_folded(fp, chain);
ret += fprintf(fp, "\n");
if (++entries_printed == callchain_param.print_limit)
break;
rb_node = rb_next(rb_node);
}
return ret;
}
static size_t hist_entry_callchain__fprintf(struct hist_entry *he, static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
u64 total_samples, int left_margin, u64 total_samples, int left_margin,
FILE *fp) FILE *fp)
...@@ -278,6 +337,9 @@ static size_t hist_entry_callchain__fprintf(struct hist_entry *he, ...@@ -278,6 +337,9 @@ static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
case CHAIN_FLAT: case CHAIN_FLAT:
return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples); return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
break; break;
case CHAIN_FOLDED:
return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
break;
case CHAIN_NONE: case CHAIN_NONE:
break; break;
default: default:
......
...@@ -21,6 +21,7 @@ libperf-y += parse-events.o ...@@ -21,6 +21,7 @@ libperf-y += parse-events.o
libperf-y += perf_regs.o libperf-y += perf_regs.o
libperf-y += path.o libperf-y += path.o
libperf-y += rbtree.o libperf-y += rbtree.o
libperf-y += libstring.o
libperf-y += bitmap.o libperf-y += bitmap.o
libperf-y += hweight.o libperf-y += hweight.o
libperf-y += run-command.o libperf-y += run-command.o
...@@ -88,6 +89,7 @@ libperf-y += parse-branch-options.o ...@@ -88,6 +89,7 @@ libperf-y += parse-branch-options.o
libperf-y += parse-regs-options.o libperf-y += parse-regs-options.o
libperf-$(CONFIG_LIBBPF) += bpf-loader.o libperf-$(CONFIG_LIBBPF) += bpf-loader.o
libperf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
libperf-$(CONFIG_LIBELF) += symbol-elf.o libperf-$(CONFIG_LIBELF) += symbol-elf.o
libperf-$(CONFIG_LIBELF) += probe-file.o libperf-$(CONFIG_LIBELF) += probe-file.o
libperf-$(CONFIG_LIBELF) += probe-event.o libperf-$(CONFIG_LIBELF) += probe-event.o
...@@ -138,6 +140,7 @@ $(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c ...@@ -138,6 +140,7 @@ $(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c
CFLAGS_find_next_bit.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" CFLAGS_find_next_bit.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_rbtree.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" CFLAGS_rbtree.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_libstring.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_hweight.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" CFLAGS_hweight.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_parse-events.o += -Wno-redundant-decls CFLAGS_parse-events.o += -Wno-redundant-decls
...@@ -153,6 +156,10 @@ $(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE ...@@ -153,6 +156,10 @@ $(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE
$(call rule_mkdir) $(call rule_mkdir)
$(call if_changed_dep,cc_o_c) $(call if_changed_dep,cc_o_c)
$(OUTPUT)util/libstring.o: ../lib/string.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
$(OUTPUT)util/hweight.o: ../lib/hweight.c FORCE $(OUTPUT)util/hweight.o: ../lib/hweight.c FORCE
$(call rule_mkdir) $(call rule_mkdir)
$(call if_changed_dep,cc_o_c) $(call if_changed_dep,cc_o_c)
This diff is collapsed.
...@@ -20,6 +20,10 @@ enum bpf_loader_errno { ...@@ -20,6 +20,10 @@ enum bpf_loader_errno {
BPF_LOADER_ERRNO__EVENTNAME, /* Event name is missing */ BPF_LOADER_ERRNO__EVENTNAME, /* Event name is missing */
BPF_LOADER_ERRNO__INTERNAL, /* BPF loader internal error */ BPF_LOADER_ERRNO__INTERNAL, /* BPF loader internal error */
BPF_LOADER_ERRNO__COMPILE, /* Error when compiling BPF scriptlet */ BPF_LOADER_ERRNO__COMPILE, /* Error when compiling BPF scriptlet */
BPF_LOADER_ERRNO__CONFIG_TERM, /* Invalid config term in config term */
BPF_LOADER_ERRNO__PROLOGUE, /* Failed to generate prologue */
BPF_LOADER_ERRNO__PROLOGUE2BIG, /* Prologue too big for program */
BPF_LOADER_ERRNO__PROLOGUEOOB, /* Offset out of bound for prologue */
__BPF_LOADER_ERRNO__END, __BPF_LOADER_ERRNO__END,
}; };
......
This diff is collapsed.
/*
* Copyright (C) 2015, He Kuang <hekuang@huawei.com>
* Copyright (C) 2015, Huawei Inc.
*/
#ifndef __BPF_PROLOGUE_H
#define __BPF_PROLOGUE_H
#include <linux/compiler.h>
#include <linux/filter.h>
#include "probe-event.h"
#define BPF_PROLOGUE_MAX_ARGS 3
#define BPF_PROLOGUE_START_ARG_REG BPF_REG_3
#define BPF_PROLOGUE_FETCH_RESULT_REG BPF_REG_2
#ifdef HAVE_BPF_PROLOGUE
int bpf__gen_prologue(struct probe_trace_arg *args, int nargs,
struct bpf_insn *new_prog, size_t *new_cnt,
size_t cnt_space);
#else
static inline int
bpf__gen_prologue(struct probe_trace_arg *args __maybe_unused,
int nargs __maybe_unused,
struct bpf_insn *new_prog __maybe_unused,
size_t *new_cnt,
size_t cnt_space __maybe_unused)
{
if (!new_cnt)
return -EINVAL;
*new_cnt = 0;
return -ENOTSUP;
}
#endif
#endif /* __BPF_PROLOGUE_H */
...@@ -44,6 +44,10 @@ static int parse_callchain_mode(const char *value) ...@@ -44,6 +44,10 @@ static int parse_callchain_mode(const char *value)
callchain_param.mode = CHAIN_GRAPH_REL; callchain_param.mode = CHAIN_GRAPH_REL;
return 0; return 0;
} }
if (!strncmp(value, "folded", strlen(value))) {
callchain_param.mode = CHAIN_FOLDED;
return 0;
}
return -1; return -1;
} }
...@@ -79,6 +83,23 @@ static int parse_callchain_sort_key(const char *value) ...@@ -79,6 +83,23 @@ static int parse_callchain_sort_key(const char *value)
return -1; return -1;
} }
static int parse_callchain_value(const char *value)
{
if (!strncmp(value, "percent", strlen(value))) {
callchain_param.value = CCVAL_PERCENT;
return 0;
}
if (!strncmp(value, "period", strlen(value))) {
callchain_param.value = CCVAL_PERIOD;
return 0;
}
if (!strncmp(value, "count", strlen(value))) {
callchain_param.value = CCVAL_COUNT;
return 0;
}
return -1;
}
static int static int
__parse_callchain_report_opt(const char *arg, bool allow_record_opt) __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
{ {
...@@ -102,7 +123,8 @@ __parse_callchain_report_opt(const char *arg, bool allow_record_opt) ...@@ -102,7 +123,8 @@ __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
if (!parse_callchain_mode(tok) || if (!parse_callchain_mode(tok) ||
!parse_callchain_order(tok) || !parse_callchain_order(tok) ||
!parse_callchain_sort_key(tok)) { !parse_callchain_sort_key(tok) ||
!parse_callchain_value(tok)) {
/* parsing ok - move on to the next */ /* parsing ok - move on to the next */
try_stack_size = false; try_stack_size = false;
goto next; goto next;
...@@ -218,6 +240,7 @@ rb_insert_callchain(struct rb_root *root, struct callchain_node *chain, ...@@ -218,6 +240,7 @@ rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
switch (mode) { switch (mode) {
case CHAIN_FLAT: case CHAIN_FLAT:
case CHAIN_FOLDED:
if (rnode->hit < chain->hit) if (rnode->hit < chain->hit)
p = &(*p)->rb_left; p = &(*p)->rb_left;
else else
...@@ -338,6 +361,7 @@ int callchain_register_param(struct callchain_param *param) ...@@ -338,6 +361,7 @@ int callchain_register_param(struct callchain_param *param)
param->sort = sort_chain_graph_rel; param->sort = sort_chain_graph_rel;
break; break;
case CHAIN_FLAT: case CHAIN_FLAT:
case CHAIN_FOLDED:
param->sort = sort_chain_flat; param->sort = sort_chain_flat;
break; break;
case CHAIN_NONE: case CHAIN_NONE:
...@@ -363,6 +387,7 @@ create_child(struct callchain_node *parent, bool inherit_children) ...@@ -363,6 +387,7 @@ create_child(struct callchain_node *parent, bool inherit_children)
} }
new->parent = parent; new->parent = parent;
INIT_LIST_HEAD(&new->val); INIT_LIST_HEAD(&new->val);
INIT_LIST_HEAD(&new->parent_val);
if (inherit_children) { if (inherit_children) {
struct rb_node *n; struct rb_node *n;
...@@ -431,6 +456,8 @@ add_child(struct callchain_node *parent, ...@@ -431,6 +456,8 @@ add_child(struct callchain_node *parent,
new->children_hit = 0; new->children_hit = 0;
new->hit = period; new->hit = period;
new->children_count = 0;
new->count = 1;
return new; return new;
} }
...@@ -478,6 +505,9 @@ split_add_child(struct callchain_node *parent, ...@@ -478,6 +505,9 @@ split_add_child(struct callchain_node *parent,
parent->children_hit = callchain_cumul_hits(new); parent->children_hit = callchain_cumul_hits(new);
new->val_nr = parent->val_nr - idx_local; new->val_nr = parent->val_nr - idx_local;
parent->val_nr = idx_local; parent->val_nr = idx_local;
new->count = parent->count;
new->children_count = parent->children_count;
parent->children_count = callchain_cumul_counts(new);
/* create a new child for the new branch if any */ /* create a new child for the new branch if any */
if (idx_total < cursor->nr) { if (idx_total < cursor->nr) {
...@@ -488,6 +518,8 @@ split_add_child(struct callchain_node *parent, ...@@ -488,6 +518,8 @@ split_add_child(struct callchain_node *parent,
parent->hit = 0; parent->hit = 0;
parent->children_hit += period; parent->children_hit += period;
parent->count = 0;
parent->children_count += 1;
node = callchain_cursor_current(cursor); node = callchain_cursor_current(cursor);
new = add_child(parent, cursor, period); new = add_child(parent, cursor, period);
...@@ -510,6 +542,7 @@ split_add_child(struct callchain_node *parent, ...@@ -510,6 +542,7 @@ split_add_child(struct callchain_node *parent,
rb_insert_color(&new->rb_node_in, &parent->rb_root_in); rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
} else { } else {
parent->hit = period; parent->hit = period;
parent->count = 1;
} }
} }
...@@ -556,6 +589,7 @@ append_chain_children(struct callchain_node *root, ...@@ -556,6 +589,7 @@ append_chain_children(struct callchain_node *root,
inc_children_hit: inc_children_hit:
root->children_hit += period; root->children_hit += period;
root->children_count++;
} }
static int static int
...@@ -608,6 +642,7 @@ append_chain(struct callchain_node *root, ...@@ -608,6 +642,7 @@ append_chain(struct callchain_node *root,
/* we match 100% of the path, increment the hit */ /* we match 100% of the path, increment the hit */
if (matches == root->val_nr && cursor->pos == cursor->nr) { if (matches == root->val_nr && cursor->pos == cursor->nr) {
root->hit += period; root->hit += period;
root->count++;
return 0; return 0;
} }
...@@ -799,12 +834,72 @@ char *callchain_list__sym_name(struct callchain_list *cl, ...@@ -799,12 +834,72 @@ char *callchain_list__sym_name(struct callchain_list *cl,
return bf; return bf;
} }
char *callchain_node__scnprintf_value(struct callchain_node *node,
char *bf, size_t bfsize, u64 total)
{
double percent = 0.0;
u64 period = callchain_cumul_hits(node);
unsigned count = callchain_cumul_counts(node);
if (callchain_param.mode == CHAIN_FOLDED) {
period = node->hit;
count = node->count;
}
switch (callchain_param.value) {
case CCVAL_PERIOD:
scnprintf(bf, bfsize, "%"PRIu64, period);
break;
case CCVAL_COUNT:
scnprintf(bf, bfsize, "%u", count);
break;
case CCVAL_PERCENT:
default:
if (total)
percent = period * 100.0 / total;
scnprintf(bf, bfsize, "%.2f%%", percent);
break;
}
return bf;
}
int callchain_node__fprintf_value(struct callchain_node *node,
FILE *fp, u64 total)
{
double percent = 0.0;
u64 period = callchain_cumul_hits(node);
unsigned count = callchain_cumul_counts(node);
if (callchain_param.mode == CHAIN_FOLDED) {
period = node->hit;
count = node->count;
}
switch (callchain_param.value) {
case CCVAL_PERIOD:
return fprintf(fp, "%"PRIu64, period);
case CCVAL_COUNT:
return fprintf(fp, "%u", count);
case CCVAL_PERCENT:
default:
if (total)
percent = period * 100.0 / total;
return percent_color_fprintf(fp, "%.2f%%", percent);
}
return 0;
}
static void free_callchain_node(struct callchain_node *node) static void free_callchain_node(struct callchain_node *node)
{ {
struct callchain_list *list, *tmp; struct callchain_list *list, *tmp;
struct callchain_node *child; struct callchain_node *child;
struct rb_node *n; struct rb_node *n;
list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
list_del(&list->list);
free(list);
}
list_for_each_entry_safe(list, tmp, &node->val, list) { list_for_each_entry_safe(list, tmp, &node->val, list) {
list_del(&list->list); list_del(&list->list);
free(list); free(list);
...@@ -828,3 +923,41 @@ void free_callchain(struct callchain_root *root) ...@@ -828,3 +923,41 @@ void free_callchain(struct callchain_root *root)
free_callchain_node(&root->node); free_callchain_node(&root->node);
} }
int callchain_node__make_parent_list(struct callchain_node *node)
{
struct callchain_node *parent = node->parent;
struct callchain_list *chain, *new;
LIST_HEAD(head);
while (parent) {
list_for_each_entry_reverse(chain, &parent->val, list) {
new = malloc(sizeof(*new));
if (new == NULL)
goto out;
*new = *chain;
new->has_children = false;
list_add_tail(&new->list, &head);
}
parent = parent->parent;
}
list_for_each_entry_safe_reverse(chain, new, &head, list)
list_move_tail(&chain->list, &node->parent_val);
if (!list_empty(&node->parent_val)) {
chain = list_first_entry(&node->parent_val, struct callchain_list, list);
chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node);
chain = list_first_entry(&node->val, struct callchain_list, list);
chain->has_children = false;
}
return 0;
out:
list_for_each_entry_safe(chain, new, &head, list) {
list_del(&chain->list);
free(chain);
}
return -ENOMEM;
}
...@@ -24,12 +24,13 @@ ...@@ -24,12 +24,13 @@
#define CALLCHAIN_RECORD_HELP CALLCHAIN_HELP RECORD_MODE_HELP RECORD_SIZE_HELP #define CALLCHAIN_RECORD_HELP CALLCHAIN_HELP RECORD_MODE_HELP RECORD_SIZE_HELP
#define CALLCHAIN_REPORT_HELP \ #define CALLCHAIN_REPORT_HELP \
HELP_PAD "print_type:\tcall graph printing style (graph|flat|fractal|none)\n" \ HELP_PAD "print_type:\tcall graph printing style (graph|flat|fractal|folded|none)\n" \
HELP_PAD "threshold:\tminimum call graph inclusion threshold (<percent>)\n" \ HELP_PAD "threshold:\tminimum call graph inclusion threshold (<percent>)\n" \
HELP_PAD "print_limit:\tmaximum number of call graph entry (<number>)\n" \ HELP_PAD "print_limit:\tmaximum number of call graph entry (<number>)\n" \
HELP_PAD "order:\t\tcall graph order (caller|callee)\n" \ HELP_PAD "order:\t\tcall graph order (caller|callee)\n" \
HELP_PAD "sort_key:\tcall graph sort key (function|address)\n" \ HELP_PAD "sort_key:\tcall graph sort key (function|address)\n" \
HELP_PAD "branch:\t\tinclude last branch info to call graph (branch)\n" HELP_PAD "branch:\t\tinclude last branch info to call graph (branch)\n" \
HELP_PAD "value:\t\tcall graph value (percent|period|count)\n"
enum perf_call_graph_mode { enum perf_call_graph_mode {
CALLCHAIN_NONE, CALLCHAIN_NONE,
...@@ -43,7 +44,8 @@ enum chain_mode { ...@@ -43,7 +44,8 @@ enum chain_mode {
CHAIN_NONE, CHAIN_NONE,
CHAIN_FLAT, CHAIN_FLAT,
CHAIN_GRAPH_ABS, CHAIN_GRAPH_ABS,
CHAIN_GRAPH_REL CHAIN_GRAPH_REL,
CHAIN_FOLDED,
}; };
enum chain_order { enum chain_order {
...@@ -54,11 +56,14 @@ enum chain_order { ...@@ -54,11 +56,14 @@ enum chain_order {
struct callchain_node { struct callchain_node {
struct callchain_node *parent; struct callchain_node *parent;
struct list_head val; struct list_head val;
struct list_head parent_val;
struct rb_node rb_node_in; /* to insert nodes in an rbtree */ struct rb_node rb_node_in; /* to insert nodes in an rbtree */
struct rb_node rb_node; /* to sort nodes in an output tree */ struct rb_node rb_node; /* to sort nodes in an output tree */
struct rb_root rb_root_in; /* input tree of children */ struct rb_root rb_root_in; /* input tree of children */
struct rb_root rb_root; /* sorted output tree of children */ struct rb_root rb_root; /* sorted output tree of children */
unsigned int val_nr; unsigned int val_nr;
unsigned int count;
unsigned int children_count;
u64 hit; u64 hit;
u64 children_hit; u64 children_hit;
}; };
...@@ -78,6 +83,12 @@ enum chain_key { ...@@ -78,6 +83,12 @@ enum chain_key {
CCKEY_ADDRESS CCKEY_ADDRESS
}; };
enum chain_value {
CCVAL_PERCENT,
CCVAL_PERIOD,
CCVAL_COUNT,
};
struct callchain_param { struct callchain_param {
bool enabled; bool enabled;
enum perf_call_graph_mode record_mode; enum perf_call_graph_mode record_mode;
...@@ -90,6 +101,7 @@ struct callchain_param { ...@@ -90,6 +101,7 @@ struct callchain_param {
bool order_set; bool order_set;
enum chain_key key; enum chain_key key;
bool branch_callstack; bool branch_callstack;
enum chain_value value;
}; };
extern struct callchain_param callchain_param; extern struct callchain_param callchain_param;
...@@ -144,6 +156,11 @@ static inline u64 callchain_cumul_hits(struct callchain_node *node) ...@@ -144,6 +156,11 @@ static inline u64 callchain_cumul_hits(struct callchain_node *node)
return node->hit + node->children_hit; return node->hit + node->children_hit;
} }
static inline unsigned callchain_cumul_counts(struct callchain_node *node)
{
return node->count + node->children_count;
}
int callchain_register_param(struct callchain_param *param); int callchain_register_param(struct callchain_param *param);
int callchain_append(struct callchain_root *root, int callchain_append(struct callchain_root *root,
struct callchain_cursor *cursor, struct callchain_cursor *cursor,
...@@ -229,7 +246,12 @@ static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused, ...@@ -229,7 +246,12 @@ static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused,
char *callchain_list__sym_name(struct callchain_list *cl, char *callchain_list__sym_name(struct callchain_list *cl,
char *bf, size_t bfsize, bool show_dso); char *bf, size_t bfsize, bool show_dso);
char *callchain_node__scnprintf_value(struct callchain_node *node,
char *bf, size_t bfsize, u64 total);
int callchain_node__fprintf_value(struct callchain_node *node,
FILE *fp, u64 total);
void free_callchain(struct callchain_root *root); void free_callchain(struct callchain_root *root);
int callchain_node__make_parent_list(struct callchain_node *node);
#endif /* __PERF_CALLCHAIN_H */ #endif /* __PERF_CALLCHAIN_H */
...@@ -1243,6 +1243,8 @@ struct dso *__dsos__addnew(struct dsos *dsos, const char *name) ...@@ -1243,6 +1243,8 @@ struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
if (dso != NULL) { if (dso != NULL) {
__dsos__add(dsos, dso); __dsos__add(dsos, dso);
dso__set_basename(dso); dso__set_basename(dso);
/* Put dso here because __dsos_add already got it */
dso__put(dso);
} }
return dso; return dso;
} }
......
...@@ -9,17 +9,17 @@ ...@@ -9,17 +9,17 @@
static const char *argv_exec_path; static const char *argv_exec_path;
static const char *argv0_path; static const char *argv0_path;
const char *system_path(const char *path) char *system_path(const char *path)
{ {
static const char *prefix = PREFIX; static const char *prefix = PREFIX;
struct strbuf d = STRBUF_INIT; struct strbuf d = STRBUF_INIT;
if (is_absolute_path(path)) if (is_absolute_path(path))
return path; return strdup(path);
strbuf_addf(&d, "%s/%s", prefix, path); strbuf_addf(&d, "%s/%s", prefix, path);
path = strbuf_detach(&d, NULL); path = strbuf_detach(&d, NULL);
return path; return (char *)path;
} }
const char *perf_extract_argv0_path(const char *argv0) const char *perf_extract_argv0_path(const char *argv0)
...@@ -52,17 +52,16 @@ void perf_set_argv_exec_path(const char *exec_path) ...@@ -52,17 +52,16 @@ void perf_set_argv_exec_path(const char *exec_path)
/* Returns the highest-priority, location to look for perf programs. */ /* Returns the highest-priority, location to look for perf programs. */
const char *perf_exec_path(void) char *perf_exec_path(void)
{ {
const char *env; char *env;
if (argv_exec_path) if (argv_exec_path)
return argv_exec_path; return strdup(argv_exec_path);
env = getenv(EXEC_PATH_ENVIRONMENT); env = getenv(EXEC_PATH_ENVIRONMENT);
if (env && *env) { if (env && *env)
return env; return strdup(env);
}
return system_path(PERF_EXEC_PATH); return system_path(PERF_EXEC_PATH);
} }
...@@ -83,9 +82,11 @@ void setup_path(void) ...@@ -83,9 +82,11 @@ void setup_path(void)
{ {
const char *old_path = getenv("PATH"); const char *old_path = getenv("PATH");
struct strbuf new_path = STRBUF_INIT; struct strbuf new_path = STRBUF_INIT;
char *tmp = perf_exec_path();
add_path(&new_path, perf_exec_path()); add_path(&new_path, tmp);
add_path(&new_path, argv0_path); add_path(&new_path, argv0_path);
free(tmp);
if (old_path) if (old_path)
strbuf_addstr(&new_path, old_path); strbuf_addstr(&new_path, old_path);
......
...@@ -3,10 +3,11 @@ ...@@ -3,10 +3,11 @@
extern void perf_set_argv_exec_path(const char *exec_path); extern void perf_set_argv_exec_path(const char *exec_path);
extern const char *perf_extract_argv0_path(const char *path); extern const char *perf_extract_argv0_path(const char *path);
extern const char *perf_exec_path(void);
extern void setup_path(void); extern void setup_path(void);
extern int execv_perf_cmd(const char **argv); /* NULL terminated */ extern int execv_perf_cmd(const char **argv); /* NULL terminated */
extern int execl_perf_cmd(const char *cmd, ...); extern int execl_perf_cmd(const char *cmd, ...);
extern const char *system_path(const char *path); /* perf_exec_path and system_path return malloc'd string, caller must free it */
extern char *perf_exec_path(void);
extern char *system_path(const char *path);
#endif /* __PERF_EXEC_CMD_H */ #endif /* __PERF_EXEC_CMD_H */
...@@ -159,7 +159,7 @@ void load_command_list(const char *prefix, ...@@ -159,7 +159,7 @@ void load_command_list(const char *prefix,
struct cmdnames *other_cmds) struct cmdnames *other_cmds)
{ {
const char *env_path = getenv("PATH"); const char *env_path = getenv("PATH");
const char *exec_path = perf_exec_path(); char *exec_path = perf_exec_path();
if (exec_path) { if (exec_path) {
list_commands_in_dir(main_cmds, exec_path, prefix); list_commands_in_dir(main_cmds, exec_path, prefix);
...@@ -187,6 +187,7 @@ void load_command_list(const char *prefix, ...@@ -187,6 +187,7 @@ void load_command_list(const char *prefix,
sizeof(*other_cmds->names), cmdname_compare); sizeof(*other_cmds->names), cmdname_compare);
uniq(other_cmds); uniq(other_cmds);
} }
free(exec_path);
exclude_cmds(other_cmds, main_cmds); exclude_cmds(other_cmds, main_cmds);
} }
...@@ -203,13 +204,14 @@ void list_commands(const char *title, struct cmdnames *main_cmds, ...@@ -203,13 +204,14 @@ void list_commands(const char *title, struct cmdnames *main_cmds,
longest = other_cmds->names[i]->len; longest = other_cmds->names[i]->len;
if (main_cmds->cnt) { if (main_cmds->cnt) {
const char *exec_path = perf_exec_path(); char *exec_path = perf_exec_path();
printf("available %s in '%s'\n", title, exec_path); printf("available %s in '%s'\n", title, exec_path);
printf("----------------"); printf("----------------");
mput_char('-', strlen(title) + strlen(exec_path)); mput_char('-', strlen(title) + strlen(exec_path));
putchar('\n'); putchar('\n');
pretty_print_string_list(main_cmds, longest); pretty_print_string_list(main_cmds, longest);
putchar('\n'); putchar('\n');
free(exec_path);
} }
if (other_cmds->cnt) { if (other_cmds->cnt) {
......
#include <string.h>
void *memdup(const void *src, size_t len);
...@@ -122,6 +122,7 @@ void machine__delete_threads(struct machine *machine) ...@@ -122,6 +122,7 @@ void machine__delete_threads(struct machine *machine)
void machine__exit(struct machine *machine) void machine__exit(struct machine *machine)
{ {
machine__destroy_kernel_maps(machine);
map_groups__exit(&machine->kmaps); map_groups__exit(&machine->kmaps);
dsos__exit(&machine->dsos); dsos__exit(&machine->dsos);
machine__exit_vdso(machine); machine__exit_vdso(machine);
...@@ -564,7 +565,7 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start, ...@@ -564,7 +565,7 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start,
const char *filename) const char *filename)
{ {
struct map *map = NULL; struct map *map = NULL;
struct dso *dso; struct dso *dso = NULL;
struct kmod_path m; struct kmod_path m;
if (kmod_path__parse_name(&m, filename)) if (kmod_path__parse_name(&m, filename))
...@@ -585,7 +586,11 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start, ...@@ -585,7 +586,11 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start,
map_groups__insert(&machine->kmaps, map); map_groups__insert(&machine->kmaps, map);
/* Put the map here because map_groups__insert alread got it */
map__put(map);
out: out:
/* put the dso here, corresponding to machine__findnew_module_dso */
dso__put(dso);
free(m.name); free(m.name);
return map; return map;
} }
...@@ -788,6 +793,7 @@ void machine__destroy_kernel_maps(struct machine *machine) ...@@ -788,6 +793,7 @@ void machine__destroy_kernel_maps(struct machine *machine)
kmap->ref_reloc_sym = NULL; kmap->ref_reloc_sym = NULL;
} }
map__put(machine->vmlinux_maps[type]);
machine->vmlinux_maps[type] = NULL; machine->vmlinux_maps[type] = NULL;
} }
} }
...@@ -1084,11 +1090,14 @@ int machine__create_kernel_maps(struct machine *machine) ...@@ -1084,11 +1090,14 @@ int machine__create_kernel_maps(struct machine *machine)
struct dso *kernel = machine__get_kernel(machine); struct dso *kernel = machine__get_kernel(machine);
const char *name; const char *name;
u64 addr = machine__get_running_kernel_start(machine, &name); u64 addr = machine__get_running_kernel_start(machine, &name);
if (!addr) int ret;
if (!addr || kernel == NULL)
return -1; return -1;
if (kernel == NULL || ret = __machine__create_kernel_maps(machine, kernel);
__machine__create_kernel_maps(machine, kernel) < 0) dso__put(kernel);
if (ret < 0)
return -1; return -1;
if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
......
...@@ -2326,8 +2326,11 @@ static int get_new_event_name(char *buf, size_t len, const char *base, ...@@ -2326,8 +2326,11 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
goto out; goto out;
if (!allow_suffix) { if (!allow_suffix) {
pr_warning("Error: event \"%s\" already exists. " pr_warning("Error: event \"%s\" already exists.\n"
"(Use -f to force duplicates.)\n", buf); " Hint: Remove existing event by 'perf probe -d'\n"
" or force duplicates by 'perf probe -f'\n"
" or set 'force=yes' in BPF source.\n",
buf);
ret = -EEXIST; ret = -EEXIST;
goto out; goto out;
} }
......
...@@ -683,20 +683,23 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf) ...@@ -683,20 +683,23 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1); ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1);
if (ret <= 0 || nops == 0) { if (ret <= 0 || nops == 0) {
pf->fb_ops = NULL; pf->fb_ops = NULL;
ret = 0;
#if _ELFUTILS_PREREQ(0, 142) #if _ELFUTILS_PREREQ(0, 142)
} else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa && } else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa &&
pf->cfi != NULL) { pf->cfi != NULL) {
Dwarf_Frame *frame; Dwarf_Frame *frame = NULL;
if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 || if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 ||
dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) { dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) {
pr_warning("Failed to get call frame on 0x%jx\n", pr_warning("Failed to get call frame on 0x%jx\n",
(uintmax_t)pf->addr); (uintmax_t)pf->addr);
return -ENOENT; ret = -ENOENT;
} }
free(frame);
#endif #endif
} }
/* Call finder's callback handler */ /* Call finder's callback handler */
if (ret >= 0)
ret = pf->callback(sc_die, pf); ret = pf->callback(sc_die, pf);
/* *pf->fb_ops will be cached in libdw. Don't free it. */ /* *pf->fb_ops will be cached in libdw. Don't free it. */
......
...@@ -342,22 +342,6 @@ char *rtrim(char *s) ...@@ -342,22 +342,6 @@ char *rtrim(char *s)
return s; return s;
} }
/**
* memdup - duplicate region of memory
* @src: memory region to duplicate
* @len: memory region length
*/
void *memdup(const void *src, size_t len)
{
void *p;
p = malloc(len);
if (p)
memcpy(p, src, len);
return p;
}
char *asprintf_expr_inout_ints(const char *var, bool in, size_t nints, int *ints) char *asprintf_expr_inout_ints(const char *var, bool in, size_t nints, int *ints)
{ {
/* /*
......
...@@ -1042,6 +1042,8 @@ int dso__load_sym(struct dso *dso, struct map *map, ...@@ -1042,6 +1042,8 @@ int dso__load_sym(struct dso *dso, struct map *map,
} }
curr_dso->symtab_type = dso->symtab_type; curr_dso->symtab_type = dso->symtab_type;
map_groups__insert(kmaps, curr_map); map_groups__insert(kmaps, curr_map);
/* kmaps already got it */
map__put(curr_map);
dsos__add(&map->groups->machine->dsos, curr_dso); dsos__add(&map->groups->machine->dsos, curr_dso);
dso__set_loaded(curr_dso, map->type); dso__set_loaded(curr_dso, map->type);
} else } else
......
...@@ -21,7 +21,8 @@ struct callchain_param callchain_param = { ...@@ -21,7 +21,8 @@ struct callchain_param callchain_param = {
.mode = CHAIN_GRAPH_ABS, .mode = CHAIN_GRAPH_ABS,
.min_percent = 0.5, .min_percent = 0.5,
.order = ORDER_CALLEE, .order = ORDER_CALLEE,
.key = CCKEY_FUNCTION .key = CCKEY_FUNCTION,
.value = CCVAL_PERCENT,
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment