Commit 402e8db5 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible changes:

 - Rename the "colors.code" ~/.perfconfig variable to "colors.jump_arrows",
   as it controls just the that UI element in the annotate browser (Taeung Song)

 - Avoid trying to read ELF symtabs from device files, noticed while doing
   memory profiling work (Jiri Olsa)

 - Improve context detection when offering options in the hists browser,
   i.e.  some options don't make sense when the browser is not working with
   a perf.data file ('perf top' mode), only in 'perf report' mode, like
   scripting (Namhyung Kim)

Infrastructure changes:

 - Elliminate duplication in the hists browser filter functions, getting the
   common part into a function that receives callbacks for filtering by
   DSO, thread, etc. (Namhyung Kim)

 - Fix misleadingly indented assignment, found using
   gcc6 -Wmisleading-indentation (Markus Trippelsdorf)

 - Handle LLVM relocation oddities in libbpf, introducing a 'perf test' that
   detects such problems and then fixing the problem, so that the test now
   passes (Wang Nan)

 - More improvements to the build infrastructure to allow reusing the
   feature detection facilities (Wang Nan)

 - Auto initialize the globals needed by cpu__max_{cpu,node}() routines
   (Arnaldo Carvalho de Melo)

Documentation changes:

 - Document the perf sysctls in Documentation/sysctl/kernel.txt (Ben Hutchings)

 - Document a bunch more ~/.perfconfig knobs (Taeung Song)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 34229b27 5ac76283
......@@ -58,6 +58,8 @@ show up in /proc/sys/kernel:
- panic_on_stackoverflow
- panic_on_unrecovered_nmi
- panic_on_warn
- perf_cpu_time_max_percent
- perf_event_paranoid
- pid_max
- powersave-nap [ PPC only ]
- printk
......@@ -639,6 +641,17 @@ allowed to execute.
==============================================================
perf_event_paranoid:
Controls use of the performance events system by unprivileged
users (without CAP_SYS_ADMIN). The default value is 1.
-1: Allow use of (almost) all events by all users
>=0: Disallow raw tracepoint access by users without CAP_IOC_LOCK
>=1: Disallow CPU event access by users without CAP_SYS_ADMIN
>=2: Disallow kernel profiling by users without CAP_SYS_ADMIN
==============================================================
pid_max:
......
......@@ -27,7 +27,7 @@ endef
# the rule that uses them - an example for that is the 'bionic'
# feature check. ]
#
FEATURE_TESTS ?= \
FEATURE_TESTS_BASIC := \
backtrace \
dwarf \
fortify-source \
......@@ -56,6 +56,25 @@ FEATURE_TESTS ?= \
get_cpuid \
bpf
# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
# of all feature tests
FEATURE_TESTS_EXTRA := \
bionic \
compile-32 \
compile-x32 \
cplus-demangle \
hello \
libbabeltrace \
liberty \
liberty-z \
libunwind-debug-frame
FEATURE_TESTS ?= $(FEATURE_TESTS_BASIC)
ifeq ($(FEATURE_TESTS),all)
FEATURE_TESTS := $(FEATURE_TESTS_BASIC) $(FEATURE_TESTS_EXTRA)
endif
FEATURE_DISPLAY ?= \
dwarf \
glibc \
......
......@@ -201,6 +201,7 @@ struct bpf_object {
Elf_Data *data;
} *reloc;
int nr_reloc;
int maps_shndx;
} efile;
/*
* All loaded bpf_object is linked in a list, which is
......@@ -350,6 +351,7 @@ static struct bpf_object *bpf_object__new(const char *path,
*/
obj->efile.obj_buf = obj_buf;
obj->efile.obj_buf_sz = obj_buf_sz;
obj->efile.maps_shndx = -1;
obj->loaded = false;
......@@ -529,12 +531,12 @@ bpf_object__init_maps(struct bpf_object *obj, void *data,
}
static int
bpf_object__init_maps_name(struct bpf_object *obj, int maps_shndx)
bpf_object__init_maps_name(struct bpf_object *obj)
{
int i;
Elf_Data *symbols = obj->efile.symbols;
if (!symbols || maps_shndx < 0)
if (!symbols || obj->efile.maps_shndx < 0)
return -EINVAL;
for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
......@@ -544,7 +546,7 @@ bpf_object__init_maps_name(struct bpf_object *obj, int maps_shndx)
if (!gelf_getsym(symbols, i, &sym))
continue;
if (sym.st_shndx != maps_shndx)
if (sym.st_shndx != obj->efile.maps_shndx)
continue;
map_name = elf_strptr(obj->efile.elf,
......@@ -572,7 +574,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
Elf *elf = obj->efile.elf;
GElf_Ehdr *ep = &obj->efile.ehdr;
Elf_Scn *scn = NULL;
int idx = 0, err = 0, maps_shndx = -1;
int idx = 0, err = 0;
/* Elf is corrupted/truncated, avoid calling elf_strptr. */
if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
......@@ -625,7 +627,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
else if (strcmp(name, "maps") == 0) {
err = bpf_object__init_maps(obj, data->d_buf,
data->d_size);
maps_shndx = idx;
obj->efile.maps_shndx = idx;
} else if (sh.sh_type == SHT_SYMTAB) {
if (obj->efile.symbols) {
pr_warning("bpf: multiple SYMTAB in %s\n",
......@@ -674,8 +676,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
pr_warning("Corrupted ELF file: index of strtab invalid\n");
return LIBBPF_ERRNO__FORMAT;
}
if (maps_shndx >= 0)
err = bpf_object__init_maps_name(obj, maps_shndx);
if (obj->efile.maps_shndx >= 0)
err = bpf_object__init_maps_name(obj);
out:
return err;
}
......@@ -697,7 +699,8 @@ bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
static int
bpf_program__collect_reloc(struct bpf_program *prog,
size_t nr_maps, GElf_Shdr *shdr,
Elf_Data *data, Elf_Data *symbols)
Elf_Data *data, Elf_Data *symbols,
int maps_shndx)
{
int i, nrels;
......@@ -724,9 +727,6 @@ bpf_program__collect_reloc(struct bpf_program *prog,
return -LIBBPF_ERRNO__FORMAT;
}
insn_idx = rel.r_offset / sizeof(struct bpf_insn);
pr_debug("relocation: insn_idx=%u\n", insn_idx);
if (!gelf_getsym(symbols,
GELF_R_SYM(rel.r_info),
&sym)) {
......@@ -735,6 +735,15 @@ bpf_program__collect_reloc(struct bpf_program *prog,
return -LIBBPF_ERRNO__FORMAT;
}
if (sym.st_shndx != maps_shndx) {
pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
prog->section_name, sym.st_shndx);
return -LIBBPF_ERRNO__RELOC;
}
insn_idx = rel.r_offset / sizeof(struct bpf_insn);
pr_debug("relocation: insn_idx=%u\n", insn_idx);
if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
insn_idx, insns[insn_idx].code);
......@@ -863,7 +872,8 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
err = bpf_program__collect_reloc(prog, nr_maps,
shdr, data,
obj->efile.symbols);
obj->efile.symbols,
obj->efile.maps_shndx);
if (err)
return err;
}
......
......@@ -62,7 +62,7 @@ Given a $HOME/.perfconfig like this:
medium = green, default
normal = lightgray, default
selected = white, lightgray
code = blue, default
jump_arrows = blue, default
addr = magenta, default
root = white, blue
......@@ -98,6 +98,204 @@ Given a $HOME/.perfconfig like this:
order = caller
sort-key = function
Variables
~~~~~~~~~
colors.*::
The variables for customizing the colors used in the output for the
'report', 'top' and 'annotate' in the TUI. They should specify the
foreground and background colors, separated by a comma, for example:
medium = green, lightgray
If you want to use the color configured for you terminal, just leave it
as 'default', for example:
medium = default, lightgray
Available colors:
red, yellow, green, cyan, gray, black, blue,
white, default, magenta, lightgray
colors.top::
'top' means a overhead percentage which is more than 5%.
And values of this variable specify percentage colors.
Basic key values are foreground-color 'red' and
background-color 'default'.
colors.medium::
'medium' means a overhead percentage which has more than 0.5%.
Default values are 'green' and 'default'.
colors.normal::
'normal' means the rest of overhead percentages
except 'top', 'medium', 'selected'.
Default values are 'lightgray' and 'default'.
colors.selected::
This selects the colors for the current entry in a list of entries
from sub-commands (top, report, annotate).
Default values are 'black' and 'lightgray'.
colors.jump_arrows::
Colors for jump arrows on assembly code listings
such as 'jns', 'jmp', 'jane', etc.
Default values are 'blue', 'default'.
colors.addr::
This selects colors for addresses from 'annotate'.
Default values are 'magenta', 'default'.
colors.root::
Colors for headers in the output of a sub-commands (top, report).
Default values are 'white', 'blue'.
tui.*, gtk.*::
Subcommands that can be configured here are 'top', 'report' and 'annotate'.
These values are booleans, for example:
[tui]
top = true
will make the TUI be the default for the 'top' subcommand. Those will be
available if the required libs were detected at tool build time.
buildid.*::
buildid.dir::
Each executable and shared library in modern distributions comes with a
content based identifier that, if available, will be inserted in a
'perf.data' file header to, at analysis time find what is needed to do
symbol resolution, code annotation, etc.
The recording tools also stores a hard link or copy in a per-user
directory, $HOME/.debug/, of binaries, shared libraries, /proc/kallsyms
and /proc/kcore files to be used at analysis time.
The buildid.dir variable can be used to either change this directory
cache location, or to disable it altogether. If you want to disable it,
set buildid.dir to /dev/null. The default is $HOME/.debug
annotate.*::
These options work only for TUI.
These are in control of addresses, jump function, source code
in lines of assembly code from a specific program.
annotate.hide_src_code::
If a program which is analyzed has source code,
this option lets 'annotate' print a list of assembly code with the source code.
For example, let's see a part of a program. There're four lines.
If this option is 'true', they can be printed
without source code from a program as below.
│ push %rbp
│ mov %rsp,%rbp
│ sub $0x10,%rsp
│ mov (%rdi),%rdx
But if this option is 'false', source code of the part
can be also printed as below. Default is 'false'.
│ struct rb_node *rb_next(const struct rb_node *node)
│ {
│ push %rbp
│ mov %rsp,%rbp
│ sub $0x10,%rsp
│ struct rb_node *parent;
│ if (RB_EMPTY_NODE(node))
│ mov (%rdi),%rdx
│ return n;
annotate.use_offset::
Basing on a first address of a loaded function, offset can be used.
Instead of using original addresses of assembly code,
addresses subtracted from a base address can be printed.
Let's illustrate an example.
If a base address is 0XFFFFFFFF81624d50 as below,
ffffffff81624d50 <load0>
an address on assembly code has a specific absolute address as below
ffffffff816250b8:│ mov 0x8(%r14),%rdi
but if use_offset is 'true', an address subtracted from a base address is printed.
Default is true. This option is only applied to TUI.
368:│ mov 0x8(%r14),%rdi
annotate.jump_arrows::
There can be jump instruction among assembly code.
Depending on a boolean value of jump_arrows,
arrows can be printed or not which represent
where do the instruction jump into as below.
│ ┌──jmp 1333
│ │ xchg %ax,%ax
│1330:│ mov %r15,%r10
│1333:└─→cmp %r15,%r14
If jump_arrow is 'false', the arrows isn't printed as below.
Default is 'false'.
│ ↓ jmp 1333
│ xchg %ax,%ax
│1330: mov %r15,%r10
│1333: cmp %r15,%r14
annotate.show_linenr::
When showing source code if this option is 'true',
line numbers are printed as below.
│1628 if (type & PERF_SAMPLE_IDENTIFIER) {
│ ↓ jne 508
│1628 data->id = *array;
│1629 array++;
│1630 }
However if this option is 'false', they aren't printed as below.
Default is 'false'.
│ if (type & PERF_SAMPLE_IDENTIFIER) {
│ ↓ jne 508
│ data->id = *array;
│ array++;
│ }
annotate.show_nr_jumps::
Let's see a part of assembly code.
│1382: movb $0x1,-0x270(%rbp)
If use this, the number of branches jumping to that address can be printed as below.
Default is 'false'.
│1 1382: movb $0x1,-0x270(%rbp)
annotate.show_total_period::
To compare two records on an instruction base, with this option
provided, display total number of samples that belong to a line
in assembly code. If this option is 'true', total periods are printed
instead of percent values as below.
302 │ mov %eax,%eax
But if this option is 'false', percent values for overhead are printed i.e.
Default is 'false'.
99.93 │ mov %eax,%eax
hist.*::
hist.percentage::
This option control the way to calculate overhead of filtered entries -
that means the value of this option is effective only if there's a
filter (by comm, dso or symbol name). Suppose a following example:
Overhead Symbols
........ .......
33.33% foo
33.33% bar
33.33% baz
This is an original overhead and we'll filter out the first 'foo'
entry. The value of 'relative' would increase the overhead of 'bar'
and 'baz' to 50.00% for each, while 'absolute' would show their
current overhead (33.33%).
SEE ALSO
--------
linkperf:perf[1]
......@@ -5,7 +5,7 @@
medium = green, lightgray
normal = black, lightgray
selected = lightgray, magenta
code = blue, lightgray
jump_arrows = blue, lightgray
addr = magenta, lightgray
[tui]
......
......@@ -165,7 +165,16 @@ ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),)
endif
endif
# Set FEATURE_TESTS to 'all' so all possible feature checkers are executed.
# Without this setting the output feature dump file misses some features, for
# example, liberty. Select all checkers so we won't get an incomplete feature
# dump file.
ifeq ($(config),1)
ifdef MAKECMDGOALS
ifeq ($(filter feature-dump,$(MAKECMDGOALS)),feature-dump)
FEATURE_TESTS := all
endif
endif
include config/Makefile
endif
......@@ -618,7 +627,7 @@ clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean
$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32
$(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
$(OUTPUT)util/intel-pt-decoder/inat-tables.c $(OUTPUT)fixdep \
$(OUTPUT)tests/llvm-src-{base,kbuild,prologue}.c
$(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
$(python-clean)
......
llvm-src-base.c
llvm-src-kbuild.c
llvm-src-prologue.c
llvm-src-relocation.c
......@@ -31,7 +31,7 @@ perf-y += sample-parsing.o
perf-y += parse-no-sample-id-all.o
perf-y += kmod-path.o
perf-y += thread-map.o
perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o llvm-src-prologue.o
perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o llvm-src-prologue.o llvm-src-relocation.o
perf-y += bpf.o
perf-y += topology.o
perf-y += cpumap.o
......@@ -59,6 +59,13 @@ $(OUTPUT)tests/llvm-src-prologue.c: tests/bpf-script-test-prologue.c tests/Build
$(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
$(Q)echo ';' >> $@
$(OUTPUT)tests/llvm-src-relocation.c: tests/bpf-script-test-relocation.c tests/Build
$(call rule_mkdir)
$(Q)echo '#include <tests/llvm.h>' > $@
$(Q)echo 'const char test_llvm__bpf_test_relocation[] =' >> $@
$(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
$(Q)echo ';' >> $@
ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64))
perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
endif
......
/*
* bpf-script-test-relocation.c
* Test BPF loader checking relocation
*/
#ifndef LINUX_VERSION_CODE
# error Need LINUX_VERSION_CODE
# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
#endif
#define BPF_ANY 0
#define BPF_MAP_TYPE_ARRAY 2
#define BPF_FUNC_map_lookup_elem 1
#define BPF_FUNC_map_update_elem 2
static void *(*bpf_map_lookup_elem)(void *map, void *key) =
(void *) BPF_FUNC_map_lookup_elem;
static void *(*bpf_map_update_elem)(void *map, void *key, void *value, int flags) =
(void *) BPF_FUNC_map_update_elem;
struct bpf_map_def {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
};
#define SEC(NAME) __attribute__((section(NAME), used))
struct bpf_map_def SEC("maps") my_table = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = 1,
};
int this_is_a_global_val;
SEC("func=sys_write")
int bpf_func__sys_write(void *ctx)
{
int key = 0;
int value = 0;
/*
* Incorrect relocation. Should not allow this program be
* loaded into kernel.
*/
bpf_map_update_elem(&this_is_a_global_val, &key, &value, 0);
return 0;
}
char _license[] SEC("license") = "GPL";
int _version SEC("version") = LINUX_VERSION_CODE;
......@@ -71,6 +71,15 @@ static struct {
(NR_ITERS + 1) / 4,
},
#endif
{
LLVM_TESTCASE_BPF_RELOCATION,
"Test BPF relocation checker",
"[bpf_relocation_test]",
"fix 'perf test LLVM' first",
"libbpf error when dealing with relocation",
NULL,
0,
},
};
static int do_test(struct bpf_object *obj, int (*func)(void),
......@@ -190,7 +199,7 @@ static int __test__bpf(int idx)
ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
bpf_testcase_table[idx].prog_id,
true);
true, NULL);
if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
pr_debug("Unable to get BPF object, %s\n",
bpf_testcase_table[idx].msg_compile_fail);
......@@ -202,14 +211,21 @@ static int __test__bpf(int idx)
obj = prepare_bpf(obj_buf, obj_buf_sz,
bpf_testcase_table[idx].name);
if (!obj) {
if ((!!bpf_testcase_table[idx].target_func) != (!!obj)) {
if (!obj)
pr_debug("Fail to load BPF object: %s\n",
bpf_testcase_table[idx].msg_load_fail);
else
pr_debug("Success unexpectedly: %s\n",
bpf_testcase_table[idx].msg_load_fail);
ret = TEST_FAIL;
goto out;
}
ret = do_test(obj,
bpf_testcase_table[idx].target_func,
bpf_testcase_table[idx].expect_result);
if (obj)
ret = do_test(obj,
bpf_testcase_table[idx].target_func,
bpf_testcase_table[idx].expect_result);
out:
bpf__clear();
return ret;
......
......@@ -35,6 +35,7 @@ static int test__bpf_parsing(void *obj_buf __maybe_unused,
static struct {
const char *source;
const char *desc;
bool should_load_fail;
} bpf_source_table[__LLVM_TESTCASE_MAX] = {
[LLVM_TESTCASE_BASE] = {
.source = test_llvm__bpf_base_prog,
......@@ -48,14 +49,19 @@ static struct {
.source = test_llvm__bpf_test_prologue_prog,
.desc = "Compile source for BPF prologue generation test",
},
[LLVM_TESTCASE_BPF_RELOCATION] = {
.source = test_llvm__bpf_test_relocation,
.desc = "Compile source for BPF relocation test",
.should_load_fail = true,
},
};
int
test_llvm__fetch_bpf_obj(void **p_obj_buf,
size_t *p_obj_buf_sz,
enum test_llvm__testcase idx,
bool force)
bool force,
bool *should_load_fail)
{
const char *source;
const char *desc;
......@@ -68,6 +74,8 @@ test_llvm__fetch_bpf_obj(void **p_obj_buf,
source = bpf_source_table[idx].source;
desc = bpf_source_table[idx].desc;
if (should_load_fail)
*should_load_fail = bpf_source_table[idx].should_load_fail;
perf_config(perf_config_cb, NULL);
......@@ -136,14 +144,15 @@ int test__llvm(int subtest)
int ret;
void *obj_buf = NULL;
size_t obj_buf_sz = 0;
bool should_load_fail = false;
if ((subtest < 0) || (subtest >= __LLVM_TESTCASE_MAX))
return TEST_FAIL;
ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
subtest, false);
subtest, false, &should_load_fail);
if (ret == TEST_OK) {
if (ret == TEST_OK && !should_load_fail) {
ret = test__bpf_parsing(obj_buf, obj_buf_sz);
if (ret != TEST_OK) {
pr_debug("Failed to parse test case '%s'\n",
......
......@@ -7,14 +7,17 @@
extern const char test_llvm__bpf_base_prog[];
extern const char test_llvm__bpf_test_kbuild_prog[];
extern const char test_llvm__bpf_test_prologue_prog[];
extern const char test_llvm__bpf_test_relocation[];
enum test_llvm__testcase {
LLVM_TESTCASE_BASE,
LLVM_TESTCASE_KBUILD,
LLVM_TESTCASE_BPF_PROLOGUE,
LLVM_TESTCASE_BPF_RELOCATION,
__LLVM_TESTCASE_MAX,
};
int test_llvm__fetch_bpf_obj(void **p_obj_buf, size_t *p_obj_buf_sz,
enum test_llvm__testcase index, bool force);
enum test_llvm__testcase index, bool force,
bool *should_load_fail);
#endif
......@@ -110,7 +110,6 @@ int test__vmlinux_matches_kallsyms(int subtest __maybe_unused)
*/
for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
struct symbol *pair, *first_pair;
bool backwards = true;
sym = rb_entry(nd, struct symbol, rb_node);
......@@ -151,27 +150,14 @@ int test__vmlinux_matches_kallsyms(int subtest __maybe_unused)
continue;
} else {
struct rb_node *nnd;
detour:
nnd = backwards ? rb_prev(&pair->rb_node) :
rb_next(&pair->rb_node);
if (nnd) {
struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
if (UM(next->start) == mem_start) {
pair = next;
pair = machine__find_kernel_symbol_by_name(&kallsyms, type, sym->name, NULL, NULL);
if (pair) {
if (UM(pair->start) == mem_start)
goto next_pair;
}
}
if (backwards) {
backwards = false;
pair = first_pair;
goto detour;
pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
mem_start, sym->name, pair->name);
}
pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
mem_start, sym->name, pair->name);
}
} else
pr_debug("%#" PRIx64 ": %s not on kallsyms\n",
......
......@@ -531,8 +531,8 @@ static struct ui_browser_colorset {
.bg = "yellow",
},
{
.colorset = HE_COLORSET_CODE,
.name = "code",
.colorset = HE_COLORSET_JUMP_ARROWS,
.name = "jump_arrows",
.fg = "blue",
.bg = "default",
},
......
......@@ -7,7 +7,7 @@
#define HE_COLORSET_MEDIUM 51
#define HE_COLORSET_NORMAL 52
#define HE_COLORSET_SELECTED 53
#define HE_COLORSET_CODE 54
#define HE_COLORSET_JUMP_ARROWS 54
#define HE_COLORSET_ADDR 55
#define HE_COLORSET_ROOT 56
......
......@@ -284,7 +284,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
to = (u64)btarget->idx;
}
ui_browser__set_color(browser, HE_COLORSET_CODE);
ui_browser__set_color(browser, HE_COLORSET_JUMP_ARROWS);
__ui_browser__line_arrow(browser, pcnt_width + 2 + ab->addr_width,
from, to);
}
......
......@@ -1782,7 +1782,7 @@ static int
add_thread_opt(struct hist_browser *browser, struct popup_action *act,
char **optstr, struct thread *thread)
{
if (thread == NULL)
if (!sort__has_thread || thread == NULL)
return 0;
if (asprintf(optstr, "Zoom %s %s(%d) thread",
......@@ -1825,7 +1825,7 @@ static int
add_dso_opt(struct hist_browser *browser, struct popup_action *act,
char **optstr, struct map *map)
{
if (map == NULL)
if (!sort__has_dso || map == NULL)
return 0;
if (asprintf(optstr, "Zoom %s %s DSO",
......@@ -1850,7 +1850,7 @@ static int
add_map_opt(struct hist_browser *browser __maybe_unused,
struct popup_action *act, char **optstr, struct map *map)
{
if (map == NULL)
if (!sort__has_dso || map == NULL)
return 0;
if (asprintf(optstr, "Browse map details") < 0)
......@@ -1971,7 +1971,7 @@ static int
add_socket_opt(struct hist_browser *browser, struct popup_action *act,
char **optstr, int socket_id)
{
if (socket_id < 0)
if (!sort__has_socket || socket_id < 0)
return 0;
if (asprintf(optstr, "Zoom %s Processor Socket %d",
......@@ -2263,10 +2263,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
continue;
}
if (!sort__has_sym)
goto add_exit_option;
if (browser->selection == NULL)
if (!sort__has_sym || browser->selection == NULL)
goto skip_annotation;
if (sort__mode == SORT_MODE__BRANCH) {
......@@ -2306,11 +2303,16 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
&options[nr_options],
socked_id);
/* perf script support */
if (!is_report_browser(hbt))
goto skip_scripting;
if (browser->he_selection) {
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
thread, NULL);
if (sort__has_thread && thread) {
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
thread, NULL);
}
/*
* Note that browser->selection != NULL
* when browser->he_selection is not NULL,
......@@ -2320,16 +2322,18 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
*
* See hist_browser__show_entry.
*/
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
NULL, browser->selection->sym);
if (sort__has_sym && browser->selection->sym) {
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
NULL, browser->selection->sym);
}
}
nr_options += add_script_opt(browser, &actions[nr_options],
&options[nr_options], NULL, NULL);
nr_options += add_switch_opt(browser, &actions[nr_options],
&options[nr_options]);
add_exit_option:
skip_scripting:
nr_options += add_exit_opt(browser, &actions[nr_options],
&options[nr_options]);
......
......@@ -8,6 +8,10 @@
#include <linux/bitmap.h>
#include "asm/bug.h"
static int max_cpu_num;
static int max_node_num;
static int *cpunode_map;
static struct cpu_map *cpu_map__default_new(void)
{
struct cpu_map *cpus;
......@@ -486,6 +490,32 @@ static void set_max_node_num(void)
pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
}
int cpu__max_node(void)
{
if (unlikely(!max_node_num))
set_max_node_num();
return max_node_num;
}
int cpu__max_cpu(void)
{
if (unlikely(!max_cpu_num))
set_max_cpu_num();
return max_cpu_num;
}
int cpu__get_node(int cpu)
{
if (unlikely(cpunode_map == NULL)) {
pr_debug("cpu_map not initialized\n");
return -1;
}
return cpunode_map[cpu];
}
static int init_cpunode_map(void)
{
int i;
......
......@@ -57,37 +57,11 @@ static inline bool cpu_map__empty(const struct cpu_map *map)
return map ? map->map[0] == -1 : true;
}
int max_cpu_num;
int max_node_num;
int *cpunode_map;
int cpu__setup_cpunode_map(void);
static inline int cpu__max_node(void)
{
if (unlikely(!max_node_num))
pr_debug("cpu_map not initialized\n");
return max_node_num;
}
static inline int cpu__max_cpu(void)
{
if (unlikely(!max_cpu_num))
pr_debug("cpu_map not initialized\n");
return max_cpu_num;
}
static inline int cpu__get_node(int cpu)
{
if (unlikely(cpunode_map == NULL)) {
pr_debug("cpu_map not initialized\n");
return -1;
}
return cpunode_map[cpu];
}
int cpu__max_node(void);
int cpu__max_cpu(void);
int cpu__get_node(int cpu);
int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
int (*f)(struct cpu_map *map, int cpu, void *data),
......
......@@ -52,6 +52,11 @@ int dso__read_binary_type_filename(const struct dso *dso,
debuglink--;
if (*debuglink == '/')
debuglink++;
ret = -1;
if (!is_regular_file(filename))
break;
ret = filename__read_debuglink(filename, debuglink,
size - (debuglink - filename));
}
......
......@@ -2362,12 +2362,15 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
case EPERM:
case EACCES:
return scnprintf(msg, size,
"You may not have permission to collect %sstats.\n"
"Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
" -1 - Not paranoid at all\n"
" 0 - Disallow raw tracepoint access for unpriv\n"
" 1 - Disallow cpu events for unpriv\n"
" 2 - Disallow kernel profiling for unpriv",
"You may not have permission to collect %sstats.\n\n"
"Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
"which controls use of the performance events system by\n"
"unprivileged users (without CAP_SYS_ADMIN).\n\n"
"The default value is 1:\n\n"
" -1: Allow use of (almost) all events by all users\n"
">= 0: Disallow raw tracepoint access by users without CAP_IOC_LOCK\n"
">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN",
target->system_wide ? "system-wide " : "");
case ENOENT:
return scnprintf(msg, size, "The %s event is not supported.",
......
......@@ -1254,28 +1254,6 @@ static bool hists__filter_entry_by_dso(struct hists *hists,
return false;
}
void hists__filter_by_dso(struct hists *hists)
{
struct rb_node *nd;
hists->stats.nr_non_filtered_samples = 0;
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (symbol_conf.exclude_other && !h->parent)
continue;
if (hists__filter_entry_by_dso(hists, h))
continue;
hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
}
}
static bool hists__filter_entry_by_thread(struct hists *hists,
struct hist_entry *he)
{
......@@ -1288,25 +1266,6 @@ static bool hists__filter_entry_by_thread(struct hists *hists,
return false;
}
void hists__filter_by_thread(struct hists *hists)
{
struct rb_node *nd;
hists->stats.nr_non_filtered_samples = 0;
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (hists__filter_entry_by_thread(hists, h))
continue;
hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
}
}
static bool hists__filter_entry_by_symbol(struct hists *hists,
struct hist_entry *he)
{
......@@ -1320,25 +1279,6 @@ static bool hists__filter_entry_by_symbol(struct hists *hists,
return false;
}
void hists__filter_by_symbol(struct hists *hists)
{
struct rb_node *nd;
hists->stats.nr_non_filtered_samples = 0;
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (hists__filter_entry_by_symbol(hists, h))
continue;
hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
}
}
static bool hists__filter_entry_by_socket(struct hists *hists,
struct hist_entry *he)
{
......@@ -1351,7 +1291,9 @@ static bool hists__filter_entry_by_socket(struct hists *hists,
return false;
}
void hists__filter_by_socket(struct hists *hists)
typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
{
struct rb_node *nd;
......@@ -1363,13 +1305,37 @@ void hists__filter_by_socket(struct hists *hists)
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (hists__filter_entry_by_socket(hists, h))
if (filter(hists, h))
continue;
hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
hists__remove_entry_filter(hists, h, type);
}
}
void hists__filter_by_thread(struct hists *hists)
{
hists__filter_by_type(hists, HIST_FILTER__THREAD,
hists__filter_entry_by_thread);
}
void hists__filter_by_dso(struct hists *hists)
{
hists__filter_by_type(hists, HIST_FILTER__DSO,
hists__filter_entry_by_dso);
}
void hists__filter_by_symbol(struct hists *hists)
{
hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
hists__filter_entry_by_symbol);
}
void hists__filter_by_socket(struct hists *hists)
{
hists__filter_by_type(hists, HIST_FILTER__SOCKET,
hists__filter_entry_by_socket);
}
void events_stats__inc(struct events_stats *stats, u32 type)
{
++stats->nr_events[0];
......
......@@ -179,6 +179,16 @@ struct symbol *machine__find_kernel_symbol(struct machine *machine,
mapp, filter);
}
static inline
struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
enum map_type type, const char *name,
struct map **mapp,
symbol_filter_t filter)
{
return map_groups__find_symbol_by_name(&machine->kmaps, type, name,
mapp, filter);
}
static inline
struct symbol *machine__find_kernel_function(struct machine *machine, u64 addr,
struct map **mapp,
......
......@@ -153,7 +153,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n
if (fd == -1)
return -1;
sret = read(fd, alias->unit, UNIT_MAX_LEN);
sret = read(fd, alias->unit, UNIT_MAX_LEN);
if (sret < 0)
goto error;
......
......@@ -25,6 +25,7 @@ int sort__has_parent = 0;
int sort__has_sym = 0;
int sort__has_dso = 0;
int sort__has_socket = 0;
int sort__has_thread = 0;
enum sort_mode sort__mode = SORT_MODE__NORMAL;
......@@ -2136,6 +2137,8 @@ static int sort_dimension__add(const char *tok,
sort__has_dso = 1;
} else if (sd->entry == &sort_socket) {
sort__has_socket = 1;
} else if (sd->entry == &sort_thread) {
sort__has_thread = 1;
}
return __sort_dimension__add(sd);
......
......@@ -32,9 +32,11 @@ extern const char default_sort_order[];
extern regex_t ignore_callees_regex;
extern int have_ignore_callees;
extern int sort__need_collapse;
extern int sort__has_dso;
extern int sort__has_parent;
extern int sort__has_sym;
extern int sort__has_socket;
extern int sort__has_thread;
extern enum sort_mode sort__mode;
extern struct sort_entry sort_comm;
extern struct sort_entry sort_dso;
......
......@@ -97,7 +97,7 @@ void perf_stat_evsel_id_init(struct perf_evsel *evsel)
}
}
void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
{
int i;
struct perf_stat_evsel *ps = evsel->priv;
......@@ -108,7 +108,7 @@ void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
perf_stat_evsel_id_init(evsel);
}
int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
{
evsel->priv = zalloc(sizeof(struct perf_stat_evsel));
if (evsel->priv == NULL)
......@@ -117,13 +117,13 @@ int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
return 0;
}
void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
{
zfree(&evsel->priv);
}
int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
int ncpus, int nthreads)
static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
int ncpus, int nthreads)
{
struct perf_counts *counts;
......@@ -134,13 +134,13 @@ int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
return counts ? 0 : -ENOMEM;
}
void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
{
perf_counts__delete(evsel->prev_raw_counts);
evsel->prev_raw_counts = NULL;
}
int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
{
int ncpus = perf_evsel__nr_cpus(evsel);
int nthreads = thread_map__nr(evsel->threads);
......
......@@ -74,16 +74,6 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
double avg, int cpu, enum aggr_mode aggr);
void perf_evsel__reset_stat_priv(struct perf_evsel *evsel);
int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel);
void perf_evsel__free_stat_priv(struct perf_evsel *evsel);
int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
int ncpus, int nthreads);
void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel);
int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw);
int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
void perf_evlist__free_stats(struct perf_evlist *evlist);
void perf_evlist__reset_stats(struct perf_evlist *evlist);
......
......@@ -1466,7 +1466,8 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
* Read the build id if possible. This is required for
* DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
*/
if (filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0)
if (is_regular_file(name) &&
filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0)
dso__set_build_id(dso, build_id);
/*
......@@ -1487,6 +1488,9 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
root_dir, name, PATH_MAX))
continue;
if (!is_regular_file(name))
continue;
/* Name is now the name of the next image to try */
if (symsrc__init(ss, dso, name, symtab_type) < 0)
continue;
......
......@@ -691,3 +691,13 @@ const char *perf_tip(const char *dirpath)
return tip;
}
bool is_regular_file(const char *file)
{
struct stat st;
if (stat(file, &st))
return false;
return S_ISREG(st.st_mode);
}
......@@ -343,5 +343,6 @@ int fetch_kernel_version(unsigned int *puint,
#define KVER_PARAM(x) KVER_VERSION(x), KVER_PATCHLEVEL(x), KVER_SUBLEVEL(x)
const char *perf_tip(const char *dirpath);
bool is_regular_file(const char *file);
#endif /* GIT_COMPAT_UTIL_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment