Commit d785452c authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-urgent-for-mingo' of...

Merge tag 'perf-urgent-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent

Pull perf/urgent fixes from Arnaldo Carvalho de Melo:

 - Fix report -F (abort, in_tx, mispredict, etc) segfaults for sample.data files
   without branch info (Jiri Olsa)

 - Add patch that should have went in a previous patchkit to use global cache
   provided by libunwind (Namhyung Kim)

 - Make CPUINFO_PROC an array to support different kernels, problem
   detected when the information reported via /proc/cpuinfo changed on ARM (Wang Nan)

 - 'perf probe' --demangle typo fix and a new --quiet option (Masami Hiramatsu)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 1776b106 4cdcc33d
......@@ -375,7 +375,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_CALLBACK('x', "exec", NULL, "executable|path",
"target executable name or path", opt_set_target),
OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
"Disable symbol demangling"),
"Enable symbol demangling"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
OPT_END()
......
......@@ -13,7 +13,7 @@
#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
#define CPUINFO_PROC "model name"
#define CPUINFO_PROC {"model name"}
#ifndef __NR_perf_event_open
# define __NR_perf_event_open 336
#endif
......@@ -30,7 +30,7 @@
#define wmb() asm volatile("sfence" ::: "memory")
#define rmb() asm volatile("lfence" ::: "memory")
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
#define CPUINFO_PROC "model name"
#define CPUINFO_PROC {"model name"}
#ifndef __NR_perf_event_open
# define __NR_perf_event_open 298
#endif
......@@ -47,14 +47,14 @@
#define mb() asm volatile ("sync" ::: "memory")
#define wmb() asm volatile ("sync" ::: "memory")
#define rmb() asm volatile ("sync" ::: "memory")
#define CPUINFO_PROC "cpu"
#define CPUINFO_PROC {"cpu"}
#endif
#ifdef __s390__
#define mb() asm volatile("bcr 15,0" ::: "memory")
#define wmb() asm volatile("bcr 15,0" ::: "memory")
#define rmb() asm volatile("bcr 15,0" ::: "memory")
#define CPUINFO_PROC "vendor_id"
#define CPUINFO_PROC {"vendor_id"}
#endif
#ifdef __sh__
......@@ -67,14 +67,14 @@
# define wmb() asm volatile("" ::: "memory")
# define rmb() asm volatile("" ::: "memory")
#endif
#define CPUINFO_PROC "cpu type"
#define CPUINFO_PROC {"cpu type"}
#endif
#ifdef __hppa__
#define mb() asm volatile("" ::: "memory")
#define wmb() asm volatile("" ::: "memory")
#define rmb() asm volatile("" ::: "memory")
#define CPUINFO_PROC "cpu"
#define CPUINFO_PROC {"cpu"}
#endif
#ifdef __sparc__
......@@ -87,14 +87,14 @@
#endif
#define wmb() asm volatile("":::"memory")
#define rmb() asm volatile("":::"memory")
#define CPUINFO_PROC "cpu"
#define CPUINFO_PROC {"cpu"}
#endif
#ifdef __alpha__
#define mb() asm volatile("mb" ::: "memory")
#define wmb() asm volatile("wmb" ::: "memory")
#define rmb() asm volatile("mb" ::: "memory")
#define CPUINFO_PROC "cpu model"
#define CPUINFO_PROC {"cpu model"}
#endif
#ifdef __ia64__
......@@ -102,7 +102,7 @@
#define wmb() asm volatile ("mf" ::: "memory")
#define rmb() asm volatile ("mf" ::: "memory")
#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
#define CPUINFO_PROC "model name"
#define CPUINFO_PROC {"model name"}
#endif
#ifdef __arm__
......@@ -113,7 +113,7 @@
#define mb() ((void(*)(void))0xffff0fa0)()
#define wmb() ((void(*)(void))0xffff0fa0)()
#define rmb() ((void(*)(void))0xffff0fa0)()
#define CPUINFO_PROC "Processor"
#define CPUINFO_PROC {"model name", "Processor"}
#endif
#ifdef __aarch64__
......@@ -133,28 +133,28 @@
: "memory")
#define wmb() mb()
#define rmb() mb()
#define CPUINFO_PROC "cpu model"
#define CPUINFO_PROC {"cpu model"}
#endif
#ifdef __arc__
#define mb() asm volatile("" ::: "memory")
#define wmb() asm volatile("" ::: "memory")
#define rmb() asm volatile("" ::: "memory")
#define CPUINFO_PROC "Processor"
#define CPUINFO_PROC {"Processor"}
#endif
#ifdef __metag__
#define mb() asm volatile("" ::: "memory")
#define wmb() asm volatile("" ::: "memory")
#define rmb() asm volatile("" ::: "memory")
#define CPUINFO_PROC "CPU"
#define CPUINFO_PROC {"CPU"}
#endif
#ifdef __xtensa__
#define mb() asm volatile("memw" ::: "memory")
#define wmb() asm volatile("memw" ::: "memory")
#define rmb() asm volatile("" ::: "memory")
#define CPUINFO_PROC "core ID"
#define CPUINFO_PROC {"core ID"}
#endif
#ifdef __tile__
......@@ -162,7 +162,7 @@
#define wmb() asm volatile ("mf" ::: "memory")
#define rmb() asm volatile ("mf" ::: "memory")
#define cpu_relax() asm volatile ("mfspr zero, PASS" ::: "memory")
#define CPUINFO_PROC "model name"
#define CPUINFO_PROC {"model name"}
#endif
#define barrier() asm volatile ("" ::: "memory")
......
......@@ -579,16 +579,12 @@ static int write_version(int fd, struct perf_header *h __maybe_unused,
return do_write_string(fd, perf_version_string);
}
static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
struct perf_evlist *evlist __maybe_unused)
static int __write_cpudesc(int fd, const char *cpuinfo_proc)
{
#ifndef CPUINFO_PROC
#define CPUINFO_PROC NULL
#endif
FILE *file;
char *buf = NULL;
char *s, *p;
const char *search = CPUINFO_PROC;
const char *search = cpuinfo_proc;
size_t len = 0;
int ret = -1;
......@@ -638,6 +634,25 @@ static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
return ret;
}
static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
struct perf_evlist *evlist __maybe_unused)
{
#ifndef CPUINFO_PROC
#define CPUINFO_PROC {"model name", }
#endif
const char *cpuinfo_procs[] = CPUINFO_PROC;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
int ret;
ret = __write_cpudesc(fd, cpuinfo_procs[i]);
if (ret >= 0)
return ret;
}
return -1;
}
static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
struct perf_evlist *evlist __maybe_unused)
{
......
......@@ -373,6 +373,9 @@ struct sort_entry sort_cpu = {
static int64_t
sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
{
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
return _sort__dso_cmp(left->branch_info->from.map,
right->branch_info->from.map);
}
......@@ -380,13 +383,19 @@ sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
if (he->branch_info)
return _hist_entry__dso_snprintf(he->branch_info->from.map,
bf, size, width);
else
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
}
static int64_t
sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
{
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
return _sort__dso_cmp(left->branch_info->to.map,
right->branch_info->to.map);
}
......@@ -394,8 +403,11 @@ sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
if (he->branch_info)
return _hist_entry__dso_snprintf(he->branch_info->to.map,
bf, size, width);
else
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
}
static int64_t
......@@ -404,6 +416,12 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
struct addr_map_symbol *from_l = &left->branch_info->from;
struct addr_map_symbol *from_r = &right->branch_info->from;
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
from_l = &left->branch_info->from;
from_r = &right->branch_info->from;
if (!from_l->sym && !from_r->sym)
return _sort__addr_cmp(from_l->addr, from_r->addr);
......@@ -413,8 +431,13 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
static int64_t
sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
{
struct addr_map_symbol *to_l = &left->branch_info->to;
struct addr_map_symbol *to_r = &right->branch_info->to;
struct addr_map_symbol *to_l, *to_r;
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
to_l = &left->branch_info->to;
to_r = &right->branch_info->to;
if (!to_l->sym && !to_r->sym)
return _sort__addr_cmp(to_l->addr, to_r->addr);
......@@ -425,19 +448,27 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
if (he->branch_info) {
struct addr_map_symbol *from = &he->branch_info->from;
return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
he->level, bf, size, width);
}
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
}
static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
if (he->branch_info) {
struct addr_map_symbol *to = &he->branch_info->to;
return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
he->level, bf, size, width);
}
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
}
struct sort_entry sort_dso_from = {
......@@ -471,11 +502,13 @@ struct sort_entry sort_sym_to = {
static int64_t
sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
{
const unsigned char mp = left->branch_info->flags.mispred !=
right->branch_info->flags.mispred;
const unsigned char p = left->branch_info->flags.predicted !=
right->branch_info->flags.predicted;
unsigned char mp, p;
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
return mp || p;
}
......@@ -483,10 +516,12 @@ static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width){
static const char *out = "N/A";
if (he->branch_info) {
if (he->branch_info->flags.predicted)
out = "N";
else if (he->branch_info->flags.mispred)
out = "Y";
}
return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
}
......@@ -989,6 +1024,9 @@ struct sort_entry sort_mem_dcacheline = {
static int64_t
sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
{
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
return left->branch_info->flags.abort !=
right->branch_info->flags.abort;
}
......@@ -996,10 +1034,15 @@ sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
static const char *out = ".";
static const char *out = "N/A";
if (he->branch_info) {
if (he->branch_info->flags.abort)
out = "A";
else
out = ".";
}
return repsep_snprintf(bf, size, "%-*s", width, out);
}
......@@ -1013,6 +1056,9 @@ struct sort_entry sort_abort = {
static int64_t
sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
{
if (!left->branch_info || !right->branch_info)
return cmp_null(left->branch_info, right->branch_info);
return left->branch_info->flags.in_tx !=
right->branch_info->flags.in_tx;
}
......@@ -1020,10 +1066,14 @@ sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
static const char *out = ".";
static const char *out = "N/A";
if (he->branch_info) {
if (he->branch_info->flags.in_tx)
out = "T";
else
out = ".";
}
return repsep_snprintf(bf, size, "%-*s", width, out);
}
......
......@@ -117,6 +117,9 @@ int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
if (!new)
return -ENOMEM;
list_add(&new->list, &thread->comm_list);
if (exec)
unwind__flush_access(thread);
}
thread->comm_set = true;
......
......@@ -539,11 +539,23 @@ int unwind__prepare_access(struct thread *thread)
return -ENOMEM;
}
unw_set_caching_policy(addr_space, UNW_CACHE_GLOBAL);
thread__set_priv(thread, addr_space);
return 0;
}
void unwind__flush_access(struct thread *thread)
{
unw_addr_space_t addr_space;
if (callchain_param.record_mode != CALLCHAIN_DWARF)
return;
addr_space = thread__priv(thread);
unw_flush_cache(addr_space, 0, 0);
}
void unwind__finish_access(struct thread *thread)
{
unw_addr_space_t addr_space;
......
......@@ -23,6 +23,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
#ifdef HAVE_LIBUNWIND_SUPPORT
int libunwind__arch_reg_id(int regnum);
int unwind__prepare_access(struct thread *thread);
void unwind__flush_access(struct thread *thread);
void unwind__finish_access(struct thread *thread);
#else
static inline int unwind__prepare_access(struct thread *thread __maybe_unused)
......@@ -30,6 +31,7 @@ static inline int unwind__prepare_access(struct thread *thread __maybe_unused)
return 0;
}
static inline void unwind__flush_access(struct thread *thread __maybe_unused) {}
static inline void unwind__finish_access(struct thread *thread __maybe_unused) {}
#endif
#else
......@@ -49,6 +51,7 @@ static inline int unwind__prepare_access(struct thread *thread __maybe_unused)
return 0;
}
static inline void unwind__flush_access(struct thread *thread __maybe_unused) {}
static inline void unwind__finish_access(struct thread *thread __maybe_unused) {}
#endif /* HAVE_DWARF_UNWIND_SUPPORT */
#endif /* __UNWIND_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment