Commit 51ab7155 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible changes:

  - Teach 'perf record' about perf_event_attr.clockid (Peter Zijlstra)

  - Improve 'perf sched replay' on high CPU core count machines (Yunlong Song)

  - Consider PERF_RECORD_ events with cpumode == 0 in 'perf top', removing one
    cause of long term memory usage buildup, i.e. not processing PERF_RECORD_EXIT
    events (Arnaldo Carvalho de Melo)

  - Add 'I' event modifier for perf_event_attr.exclude_idle bit (Jiri Olsa)

  - Respect -i option 'in perf kmem' (Jiri Olsa)

Infrastructure changes:

  - Honor operator priority in libtraceevent (Namhyung Kim)

  - Merge all perf_event_attr print functions (Peter Zijlstra)

  - Check kmaps access to make code more robust (Wang Nan)

  - Fix inverted logic in perf_mmap__empty() (He Kuang)

  - Fix ARM 32 'perf probe' building error (Wang Nan)

  - Fix perf_event_attr tests (Jiri Olsa)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 6645f318 a1e12da4
...@@ -1939,7 +1939,22 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) ...@@ -1939,7 +1939,22 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
goto out_warn_free; goto out_warn_free;
type = process_arg_token(event, right, tok, type); type = process_arg_token(event, right, tok, type);
arg->op.right = right;
if (right->type == PRINT_OP &&
get_op_prio(arg->op.op) < get_op_prio(right->op.op)) {
struct print_arg tmp;
/* rotate ops according to the priority */
arg->op.right = right->op.left;
tmp = *arg;
*arg = *right;
*right = tmp;
arg->op.left = right;
} else {
arg->op.right = right;
}
} else if (strcmp(token, "[") == 0) { } else if (strcmp(token, "[") == 0) {
......
...@@ -26,6 +26,7 @@ counted. The following modifiers exist: ...@@ -26,6 +26,7 @@ counted. The following modifiers exist:
u - user-space counting u - user-space counting
k - kernel counting k - kernel counting
h - hypervisor counting h - hypervisor counting
I - non idle counting
G - guest counting (in KVM guests) G - guest counting (in KVM guests)
H - host counting (not in KVM guests) H - host counting (not in KVM guests)
p - precise level p - precise level
......
...@@ -250,6 +250,13 @@ is off by default. ...@@ -250,6 +250,13 @@ is off by default.
--running-time:: --running-time::
Record running and enabled time for read events (:S) Record running and enabled time for read events (:S)
-k::
--clockid::
Sets the clock id to use for the various time fields in the perf_event_type
records. See clock_gettime(). In particular CLOCK_MONOTONIC and
CLOCK_MONOTONIC_RAW are supported, some events might also allow
CLOCK_BOOTTIME, CLOCK_REALTIME and CLOCK_TAI.
SEE ALSO SEE ALSO
-------- --------
linkperf:perf-stat[1], linkperf:perf-list[1] linkperf:perf-stat[1], linkperf:perf-list[1]
...@@ -663,7 +663,6 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) ...@@ -663,7 +663,6 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
{ {
const char * const default_sort_order = "frag,hit,bytes"; const char * const default_sort_order = "frag,hit,bytes";
struct perf_data_file file = { struct perf_data_file file = {
.path = input_name,
.mode = PERF_DATA_MODE_READ, .mode = PERF_DATA_MODE_READ,
}; };
const struct option kmem_options[] = { const struct option kmem_options[] = {
...@@ -701,6 +700,8 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) ...@@ -701,6 +700,8 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
return __cmd_record(argc, argv); return __cmd_record(argc, argv);
} }
file.path = input_name;
session = perf_session__new(&file, false, &perf_kmem); session = perf_session__new(&file, false, &perf_kmem);
if (session == NULL) if (session == NULL)
return -1; return -1;
......
...@@ -711,6 +711,90 @@ static int perf_record_config(const char *var, const char *value, void *cb) ...@@ -711,6 +711,90 @@ static int perf_record_config(const char *var, const char *value, void *cb)
return perf_default_config(var, value, cb); return perf_default_config(var, value, cb);
} }
struct clockid_map {
const char *name;
int clockid;
};
#define CLOCKID_MAP(n, c) \
{ .name = n, .clockid = (c), }
#define CLOCKID_END { .name = NULL, }
/*
* Add the missing ones, we need to build on many distros...
*/
#ifndef CLOCK_MONOTONIC_RAW
#define CLOCK_MONOTONIC_RAW 4
#endif
#ifndef CLOCK_BOOTTIME
#define CLOCK_BOOTTIME 7
#endif
#ifndef CLOCK_TAI
#define CLOCK_TAI 11
#endif
static const struct clockid_map clockids[] = {
/* available for all events, NMI safe */
CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
/* available for some events */
CLOCKID_MAP("realtime", CLOCK_REALTIME),
CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
CLOCKID_MAP("tai", CLOCK_TAI),
/* available for the lazy */
CLOCKID_MAP("mono", CLOCK_MONOTONIC),
CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
CLOCKID_MAP("real", CLOCK_REALTIME),
CLOCKID_MAP("boot", CLOCK_BOOTTIME),
CLOCKID_END,
};
static int parse_clockid(const struct option *opt, const char *str, int unset)
{
struct record_opts *opts = (struct record_opts *)opt->value;
const struct clockid_map *cm;
const char *ostr = str;
if (unset) {
opts->use_clockid = 0;
return 0;
}
/* no arg passed */
if (!str)
return 0;
/* no setting it twice */
if (opts->use_clockid)
return -1;
opts->use_clockid = true;
/* if its a number, we're done */
if (sscanf(str, "%d", &opts->clockid) == 1)
return 0;
/* allow a "CLOCK_" prefix to the name */
if (!strncasecmp(str, "CLOCK_", 6))
str += 6;
for (cm = clockids; cm->name; cm++) {
if (!strcasecmp(str, cm->name)) {
opts->clockid = cm->clockid;
return 0;
}
}
opts->use_clockid = false;
ui__warning("unknown clockid %s, check man page\n", ostr);
return -1;
}
static const char * const __record_usage[] = { static const char * const __record_usage[] = {
"perf record [<options>] [<command>]", "perf record [<options>] [<command>]",
"perf record [<options>] -- <command> [<options>]", "perf record [<options>] -- <command> [<options>]",
...@@ -842,6 +926,9 @@ struct option __record_options[] = { ...@@ -842,6 +926,9 @@ struct option __record_options[] = {
"Sample machine registers on interrupt"), "Sample machine registers on interrupt"),
OPT_BOOLEAN(0, "running-time", &record.opts.running_time, OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
"Record running/enabled time of read (:S) events"), "Record running/enabled time of read (:S) events"),
OPT_CALLBACK('k', "clockid", &record.opts,
"clockid", "clockid to use for events, see clock_gettime()",
parse_clockid),
OPT_END() OPT_END()
}; };
......
...@@ -347,7 +347,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist, ...@@ -347,7 +347,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
static void report__warn_kptr_restrict(const struct report *rep) static void report__warn_kptr_restrict(const struct report *rep)
{ {
struct map *kernel_map = rep->session->machines.host.vmlinux_maps[MAP__FUNCTION]; struct map *kernel_map = rep->session->machines.host.vmlinux_maps[MAP__FUNCTION];
struct kmap *kernel_kmap = map__kmap(kernel_map); struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
if (kernel_map == NULL || if (kernel_map == NULL ||
(kernel_map->dso->hit && (kernel_map->dso->hit &&
......
...@@ -23,12 +23,13 @@ ...@@ -23,12 +23,13 @@
#include <semaphore.h> #include <semaphore.h>
#include <pthread.h> #include <pthread.h>
#include <math.h> #include <math.h>
#include <api/fs/fs.h>
#define PR_SET_NAME 15 /* Set process name */ #define PR_SET_NAME 15 /* Set process name */
#define MAX_CPUS 4096 #define MAX_CPUS 4096
#define COMM_LEN 20 #define COMM_LEN 20
#define SYM_LEN 129 #define SYM_LEN 129
#define MAX_PID 65536 #define MAX_PID 1024000
struct sched_atom; struct sched_atom;
...@@ -124,7 +125,7 @@ struct perf_sched { ...@@ -124,7 +125,7 @@ struct perf_sched {
struct perf_tool tool; struct perf_tool tool;
const char *sort_order; const char *sort_order;
unsigned long nr_tasks; unsigned long nr_tasks;
struct task_desc *pid_to_task[MAX_PID]; struct task_desc **pid_to_task;
struct task_desc **tasks; struct task_desc **tasks;
const struct trace_sched_handler *tp_handler; const struct trace_sched_handler *tp_handler;
pthread_mutex_t start_work_mutex; pthread_mutex_t start_work_mutex;
...@@ -169,6 +170,7 @@ struct perf_sched { ...@@ -169,6 +170,7 @@ struct perf_sched {
u64 cpu_last_switched[MAX_CPUS]; u64 cpu_last_switched[MAX_CPUS];
struct rb_root atom_root, sorted_atom_root; struct rb_root atom_root, sorted_atom_root;
struct list_head sort_list, cmp_pid; struct list_head sort_list, cmp_pid;
bool force;
}; };
static u64 get_nsecs(void) static u64 get_nsecs(void)
...@@ -326,8 +328,19 @@ static struct task_desc *register_pid(struct perf_sched *sched, ...@@ -326,8 +328,19 @@ static struct task_desc *register_pid(struct perf_sched *sched,
unsigned long pid, const char *comm) unsigned long pid, const char *comm)
{ {
struct task_desc *task; struct task_desc *task;
static int pid_max;
BUG_ON(pid >= MAX_PID); if (sched->pid_to_task == NULL) {
if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
pid_max = MAX_PID;
BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
}
if (pid >= (unsigned long)pid_max) {
BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
sizeof(struct task_desc *))) == NULL);
while (pid >= (unsigned long)pid_max)
sched->pid_to_task[pid_max++] = NULL;
}
task = sched->pid_to_task[pid]; task = sched->pid_to_task[pid];
...@@ -346,7 +359,7 @@ static struct task_desc *register_pid(struct perf_sched *sched, ...@@ -346,7 +359,7 @@ static struct task_desc *register_pid(struct perf_sched *sched,
sched->pid_to_task[pid] = task; sched->pid_to_task[pid] = task;
sched->nr_tasks++; sched->nr_tasks++;
sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_task *)); sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
BUG_ON(!sched->tasks); BUG_ON(!sched->tasks);
sched->tasks[task->nr] = task; sched->tasks[task->nr] = task;
...@@ -425,24 +438,45 @@ static u64 get_cpu_usage_nsec_parent(void) ...@@ -425,24 +438,45 @@ static u64 get_cpu_usage_nsec_parent(void)
return sum; return sum;
} }
static int self_open_counters(void) static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
{ {
struct perf_event_attr attr; struct perf_event_attr attr;
char sbuf[STRERR_BUFSIZE]; char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
int fd; int fd;
struct rlimit limit;
bool need_privilege = false;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, sizeof(attr));
attr.type = PERF_TYPE_SOFTWARE; attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_TASK_CLOCK; attr.config = PERF_COUNT_SW_TASK_CLOCK;
force_again:
fd = sys_perf_event_open(&attr, 0, -1, -1, fd = sys_perf_event_open(&attr, 0, -1, -1,
perf_event_open_cloexec_flag()); perf_event_open_cloexec_flag());
if (fd < 0) if (fd < 0) {
if (errno == EMFILE) {
if (sched->force) {
BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
limit.rlim_cur += sched->nr_tasks - cur_task;
if (limit.rlim_cur > limit.rlim_max) {
limit.rlim_max = limit.rlim_cur;
need_privilege = true;
}
if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
if (need_privilege && errno == EPERM)
strcpy(info, "Need privilege\n");
} else
goto force_again;
} else
strcpy(info, "Have a try with -f option\n");
}
pr_err("Error: sys_perf_event_open() syscall returned " pr_err("Error: sys_perf_event_open() syscall returned "
"with %d (%s)\n", fd, "with %d (%s)\n%s", fd,
strerror_r(errno, sbuf, sizeof(sbuf))); strerror_r(errno, sbuf, sizeof(sbuf)), info);
exit(EXIT_FAILURE);
}
return fd; return fd;
} }
...@@ -460,6 +494,7 @@ static u64 get_cpu_usage_nsec_self(int fd) ...@@ -460,6 +494,7 @@ static u64 get_cpu_usage_nsec_self(int fd)
struct sched_thread_parms { struct sched_thread_parms {
struct task_desc *task; struct task_desc *task;
struct perf_sched *sched; struct perf_sched *sched;
int fd;
}; };
static void *thread_func(void *ctx) static void *thread_func(void *ctx)
...@@ -470,13 +505,12 @@ static void *thread_func(void *ctx) ...@@ -470,13 +505,12 @@ static void *thread_func(void *ctx)
u64 cpu_usage_0, cpu_usage_1; u64 cpu_usage_0, cpu_usage_1;
unsigned long i, ret; unsigned long i, ret;
char comm2[22]; char comm2[22];
int fd; int fd = parms->fd;
zfree(&parms); zfree(&parms);
sprintf(comm2, ":%s", this_task->comm); sprintf(comm2, ":%s", this_task->comm);
prctl(PR_SET_NAME, comm2); prctl(PR_SET_NAME, comm2);
fd = self_open_counters();
if (fd < 0) if (fd < 0)
return NULL; return NULL;
again: again:
...@@ -528,6 +562,7 @@ static void create_tasks(struct perf_sched *sched) ...@@ -528,6 +562,7 @@ static void create_tasks(struct perf_sched *sched)
BUG_ON(parms == NULL); BUG_ON(parms == NULL);
parms->task = task = sched->tasks[i]; parms->task = task = sched->tasks[i];
parms->sched = sched; parms->sched = sched;
parms->fd = self_open_counters(sched, i);
sem_init(&task->sleep_sem, 0, 0); sem_init(&task->sleep_sem, 0, 0);
sem_init(&task->ready_for_work, 0, 0); sem_init(&task->ready_for_work, 0, 0);
sem_init(&task->work_done_sem, 0, 0); sem_init(&task->work_done_sem, 0, 0);
...@@ -572,13 +607,13 @@ static void wait_for_tasks(struct perf_sched *sched) ...@@ -572,13 +607,13 @@ static void wait_for_tasks(struct perf_sched *sched)
cpu_usage_1 = get_cpu_usage_nsec_parent(); cpu_usage_1 = get_cpu_usage_nsec_parent();
if (!sched->runavg_cpu_usage) if (!sched->runavg_cpu_usage)
sched->runavg_cpu_usage = sched->cpu_usage; sched->runavg_cpu_usage = sched->cpu_usage;
sched->runavg_cpu_usage = (sched->runavg_cpu_usage * 9 + sched->cpu_usage) / 10; sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0; sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
if (!sched->runavg_parent_cpu_usage) if (!sched->runavg_parent_cpu_usage)
sched->runavg_parent_cpu_usage = sched->parent_cpu_usage; sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * 9 + sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
sched->parent_cpu_usage)/10; sched->parent_cpu_usage)/sched->replay_repeat;
ret = pthread_mutex_lock(&sched->start_work_mutex); ret = pthread_mutex_lock(&sched->start_work_mutex);
BUG_ON(ret); BUG_ON(ret);
...@@ -610,7 +645,7 @@ static void run_one_test(struct perf_sched *sched) ...@@ -610,7 +645,7 @@ static void run_one_test(struct perf_sched *sched)
sched->sum_fluct += fluct; sched->sum_fluct += fluct;
if (!sched->run_avg) if (!sched->run_avg)
sched->run_avg = delta; sched->run_avg = delta;
sched->run_avg = (sched->run_avg * 9 + delta) / 10; sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / 1000000.0); printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / 1000000.0);
...@@ -1452,6 +1487,7 @@ static int perf_sched__read_events(struct perf_sched *sched) ...@@ -1452,6 +1487,7 @@ static int perf_sched__read_events(struct perf_sched *sched)
struct perf_data_file file = { struct perf_data_file file = {
.path = input_name, .path = input_name,
.mode = PERF_DATA_MODE_READ, .mode = PERF_DATA_MODE_READ,
.force = sched->force,
}; };
int rc = -1; int rc = -1;
...@@ -1685,6 +1721,7 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) ...@@ -1685,6 +1721,7 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
"be more verbose (show symbol address, etc)"), "be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"), "dump raw trace in ASCII"),
OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
OPT_END() OPT_END()
}; };
const struct option sched_options[] = { const struct option sched_options[] = {
......
...@@ -62,6 +62,8 @@ struct record_opts { ...@@ -62,6 +62,8 @@ struct record_opts {
u64 user_interval; u64 user_interval;
bool sample_transaction; bool sample_transaction;
unsigned initial_delay; unsigned initial_delay;
bool use_clockid;
clockid_t clockid;
}; };
struct option; struct option;
......
...@@ -5,7 +5,7 @@ group_fd=-1 ...@@ -5,7 +5,7 @@ group_fd=-1
flags=0|8 flags=0|8
cpu=* cpu=*
type=0|1 type=0|1
size=104 size=112
config=0 config=0
sample_period=4000 sample_period=4000
sample_type=263 sample_type=263
......
...@@ -5,7 +5,7 @@ group_fd=-1 ...@@ -5,7 +5,7 @@ group_fd=-1
flags=0|8 flags=0|8
cpu=* cpu=*
type=0 type=0
size=104 size=112
config=0 config=0
sample_period=0 sample_period=0
sample_type=0 sample_type=0
......
...@@ -295,6 +295,36 @@ static int test__checkevent_genhw_modifier(struct perf_evlist *evlist) ...@@ -295,6 +295,36 @@ static int test__checkevent_genhw_modifier(struct perf_evlist *evlist)
return test__checkevent_genhw(evlist); return test__checkevent_genhw(evlist);
} }
static int test__checkevent_exclude_idle_modifier(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude idle", evsel->attr.exclude_idle);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
return test__checkevent_symbolic_name(evlist);
}
static int test__checkevent_exclude_idle_modifier_1(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude idle", evsel->attr.exclude_idle);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
return test__checkevent_symbolic_name(evlist);
}
static int test__checkevent_breakpoint_modifier(struct perf_evlist *evlist) static int test__checkevent_breakpoint_modifier(struct perf_evlist *evlist)
{ {
struct perf_evsel *evsel = perf_evlist__first(evlist); struct perf_evsel *evsel = perf_evlist__first(evlist);
...@@ -1494,6 +1524,16 @@ static struct evlist_test test__events[] = { ...@@ -1494,6 +1524,16 @@ static struct evlist_test test__events[] = {
.id = 100, .id = 100,
}, },
#endif #endif
{
.name = "instructions:I",
.check = test__checkevent_exclude_idle_modifier,
.id = 45,
},
{
.name = "instructions:kIG",
.check = test__checkevent_exclude_idle_modifier_1,
.id = 46,
},
}; };
static struct evlist_test test__events_pmu[] = { static struct evlist_test test__events_pmu[] = {
......
...@@ -695,7 +695,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) ...@@ -695,7 +695,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
static bool perf_mmap__empty(struct perf_mmap *md) static bool perf_mmap__empty(struct perf_mmap *md)
{ {
return perf_mmap__read_head(md) != md->prev; return perf_mmap__read_head(md) == md->prev;
} }
static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx) static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
......
...@@ -32,8 +32,12 @@ static struct { ...@@ -32,8 +32,12 @@ static struct {
bool exclude_guest; bool exclude_guest;
bool mmap2; bool mmap2;
bool cloexec; bool cloexec;
bool clockid;
bool clockid_wrong;
} perf_missing_features; } perf_missing_features;
static clockid_t clockid;
static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused) static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused)
{ {
return 0; return 0;
...@@ -761,6 +765,12 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) ...@@ -761,6 +765,12 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
attr->disabled = 0; attr->disabled = 0;
attr->enable_on_exec = 0; attr->enable_on_exec = 0;
} }
clockid = opts->clockid;
if (opts->use_clockid) {
attr->use_clockid = 1;
attr->clockid = opts->clockid;
}
} }
static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
...@@ -1001,67 +1011,126 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread) ...@@ -1001,67 +1011,126 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
return fd; return fd;
} }
#define __PRINT_ATTR(fmt, cast, field) \ struct bit_names {
fprintf(fp, " %-19s "fmt"\n", #field, cast attr->field) int bit;
const char *name;
#define PRINT_ATTR_U32(field) __PRINT_ATTR("%u" , , field) };
#define PRINT_ATTR_X32(field) __PRINT_ATTR("%#x", , field)
#define PRINT_ATTR_U64(field) __PRINT_ATTR("%" PRIu64, (uint64_t), field) static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
#define PRINT_ATTR_X64(field) __PRINT_ATTR("%#"PRIx64, (uint64_t), field) {
bool first_bit = true;
#define PRINT_ATTR2N(name1, field1, name2, field2) \ int i = 0;
fprintf(fp, " %-19s %u %-19s %u\n", \
name1, attr->field1, name2, attr->field2) do {
if (value & bits[i].bit) {
#define PRINT_ATTR2(field1, field2) \ buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
PRINT_ATTR2N(#field1, field1, #field2, field2) first_bit = false;
}
static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp) } while (bits[++i].name != NULL);
{ }
size_t ret = 0;
static void __p_sample_type(char *buf, size_t size, u64 value)
ret += fprintf(fp, "%.60s\n", graph_dotted_line); {
ret += fprintf(fp, "perf_event_attr:\n"); #define bit_name(n) { PERF_SAMPLE_##n, #n }
struct bit_names bits[] = {
ret += PRINT_ATTR_U32(type); bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
ret += PRINT_ATTR_U32(size); bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
ret += PRINT_ATTR_X64(config); bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
ret += PRINT_ATTR_U64(sample_period); bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
ret += PRINT_ATTR_U64(sample_freq); bit_name(IDENTIFIER), bit_name(REGS_INTR),
ret += PRINT_ATTR_X64(sample_type); { .name = NULL, }
ret += PRINT_ATTR_X64(read_format); };
#undef bit_name
ret += PRINT_ATTR2(disabled, inherit); __p_bits(buf, size, value, bits);
ret += PRINT_ATTR2(pinned, exclusive); }
ret += PRINT_ATTR2(exclude_user, exclude_kernel);
ret += PRINT_ATTR2(exclude_hv, exclude_idle); static void __p_read_format(char *buf, size_t size, u64 value)
ret += PRINT_ATTR2(mmap, comm); {
ret += PRINT_ATTR2(mmap2, comm_exec); #define bit_name(n) { PERF_FORMAT_##n, #n }
ret += PRINT_ATTR2(freq, inherit_stat); struct bit_names bits[] = {
ret += PRINT_ATTR2(enable_on_exec, task); bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
ret += PRINT_ATTR2(watermark, precise_ip); bit_name(ID), bit_name(GROUP),
ret += PRINT_ATTR2(mmap_data, sample_id_all); { .name = NULL, }
ret += PRINT_ATTR2(exclude_host, exclude_guest); };
ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel, #undef bit_name
"excl.callchain_user", exclude_callchain_user); __p_bits(buf, size, value, bits);
}
ret += PRINT_ATTR_U32(wakeup_events);
ret += PRINT_ATTR_U32(wakeup_watermark); #define BUF_SIZE 1024
ret += PRINT_ATTR_X32(bp_type);
ret += PRINT_ATTR_X64(bp_addr); #define p_hex(val) snprintf(buf, BUF_SIZE, "%"PRIx64, (uint64_t)(val))
ret += PRINT_ATTR_X64(config1); #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
ret += PRINT_ATTR_U64(bp_len); #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
ret += PRINT_ATTR_X64(config2); #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
ret += PRINT_ATTR_X64(branch_sample_type); #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
ret += PRINT_ATTR_X64(sample_regs_user);
ret += PRINT_ATTR_U32(sample_stack_user); #define PRINT_ATTRn(_n, _f, _p) \
ret += PRINT_ATTR_X64(sample_regs_intr); do { \
if (attr->_f) { \
ret += fprintf(fp, "%.60s\n", graph_dotted_line); _p(attr->_f); \
ret += attr__fprintf(fp, _n, buf, priv);\
} \
} while (0)
#define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
attr__fprintf_f attr__fprintf, void *priv)
{
char buf[BUF_SIZE];
int ret = 0;
PRINT_ATTRf(type, p_unsigned);
PRINT_ATTRf(size, p_unsigned);
PRINT_ATTRf(config, p_hex);
PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
PRINT_ATTRf(sample_type, p_sample_type);
PRINT_ATTRf(read_format, p_read_format);
PRINT_ATTRf(disabled, p_unsigned);
PRINT_ATTRf(inherit, p_unsigned);
PRINT_ATTRf(pinned, p_unsigned);
PRINT_ATTRf(exclusive, p_unsigned);
PRINT_ATTRf(exclude_user, p_unsigned);
PRINT_ATTRf(exclude_kernel, p_unsigned);
PRINT_ATTRf(exclude_hv, p_unsigned);
PRINT_ATTRf(exclude_idle, p_unsigned);
PRINT_ATTRf(mmap, p_unsigned);
PRINT_ATTRf(comm, p_unsigned);
PRINT_ATTRf(freq, p_unsigned);
PRINT_ATTRf(inherit_stat, p_unsigned);
PRINT_ATTRf(enable_on_exec, p_unsigned);
PRINT_ATTRf(task, p_unsigned);
PRINT_ATTRf(watermark, p_unsigned);
PRINT_ATTRf(precise_ip, p_unsigned);
PRINT_ATTRf(mmap_data, p_unsigned);
PRINT_ATTRf(sample_id_all, p_unsigned);
PRINT_ATTRf(exclude_host, p_unsigned);
PRINT_ATTRf(exclude_guest, p_unsigned);
PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
PRINT_ATTRf(exclude_callchain_user, p_unsigned);
PRINT_ATTRf(mmap2, p_unsigned);
PRINT_ATTRf(comm_exec, p_unsigned);
PRINT_ATTRf(use_clockid, p_unsigned);
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
PRINT_ATTRf(bp_type, p_unsigned);
PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
PRINT_ATTRf(sample_regs_user, p_hex);
PRINT_ATTRf(sample_stack_user, p_unsigned);
PRINT_ATTRf(clockid, p_signed);
PRINT_ATTRf(sample_regs_intr, p_hex);
return ret; return ret;
} }
static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
void *priv __attribute__((unused)))
{
return fprintf(fp, " %-32s %s\n", name, val);
}
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
struct thread_map *threads) struct thread_map *threads)
{ {
...@@ -1085,6 +1154,12 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, ...@@ -1085,6 +1154,12 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
} }
fallback_missing_features: fallback_missing_features:
if (perf_missing_features.clockid_wrong)
evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */
if (perf_missing_features.clockid) {
evsel->attr.use_clockid = 0;
evsel->attr.clockid = 0;
}
if (perf_missing_features.cloexec) if (perf_missing_features.cloexec)
flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC; flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
if (perf_missing_features.mmap2) if (perf_missing_features.mmap2)
...@@ -1095,8 +1170,12 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, ...@@ -1095,8 +1170,12 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
if (perf_missing_features.sample_id_all) if (perf_missing_features.sample_id_all)
evsel->attr.sample_id_all = 0; evsel->attr.sample_id_all = 0;
if (verbose >= 2) if (verbose >= 2) {
perf_event_attr__fprintf(&evsel->attr, stderr); fprintf(stderr, "%.60s\n", graph_dotted_line);
fprintf(stderr, "perf_event_attr:\n");
perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
fprintf(stderr, "%.60s\n", graph_dotted_line);
}
for (cpu = 0; cpu < cpus->nr; cpu++) { for (cpu = 0; cpu < cpus->nr; cpu++) {
...@@ -1122,6 +1201,17 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, ...@@ -1122,6 +1201,17 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
goto try_fallback; goto try_fallback;
} }
set_rlimit = NO_CHANGE; set_rlimit = NO_CHANGE;
/*
* If we succeeded but had to kill clockid, fail and
* have perf_evsel__open_strerror() print us a nice
* error.
*/
if (perf_missing_features.clockid ||
perf_missing_features.clockid_wrong) {
err = -EINVAL;
goto out_close;
}
} }
} }
...@@ -1155,7 +1245,17 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, ...@@ -1155,7 +1245,17 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
if (err != -EINVAL || cpu > 0 || thread > 0) if (err != -EINVAL || cpu > 0 || thread > 0)
goto out_close; goto out_close;
if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) { /*
* Must probe features in the order they were added to the
* perf_event_attr interface.
*/
if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
perf_missing_features.clockid_wrong = true;
goto fallback_missing_features;
} else if (!perf_missing_features.clockid && evsel->attr.use_clockid) {
perf_missing_features.clockid = true;
goto fallback_missing_features;
} else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
perf_missing_features.cloexec = true; perf_missing_features.cloexec = true;
goto fallback_missing_features; goto fallback_missing_features;
} else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) { } else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
...@@ -1956,62 +2056,9 @@ static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...) ...@@ -1956,62 +2056,9 @@ static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
return ret; return ret;
} }
static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value) static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv)
{
if (value == 0)
return 0;
return comma_fprintf(fp, first, " %s: %" PRIu64, field, value);
}
#define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
struct bit_names {
int bit;
const char *name;
};
static int bits__fprintf(FILE *fp, const char *field, u64 value,
struct bit_names *bits, bool *first)
{
int i = 0, printed = comma_fprintf(fp, first, " %s: ", field);
bool first_bit = true;
do {
if (value & bits[i].bit) {
printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name);
first_bit = false;
}
} while (bits[++i].name != NULL);
return printed;
}
static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
{ {
#define bit_name(n) { PERF_SAMPLE_##n, #n } return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val);
struct bit_names bits[] = {
bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
bit_name(IDENTIFIER), bit_name(REGS_INTR),
{ .name = NULL, }
};
#undef bit_name
return bits__fprintf(fp, "sample_type", value, bits, first);
}
static int read_format__fprintf(FILE *fp, bool *first, u64 value)
{
#define bit_name(n) { PERF_FORMAT_##n, #n }
struct bit_names bits[] = {
bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
bit_name(ID), bit_name(GROUP),
{ .name = NULL, }
};
#undef bit_name
return bits__fprintf(fp, "read_format", value, bits, first);
} }
int perf_evsel__fprintf(struct perf_evsel *evsel, int perf_evsel__fprintf(struct perf_evsel *evsel,
...@@ -2040,47 +2087,13 @@ int perf_evsel__fprintf(struct perf_evsel *evsel, ...@@ -2040,47 +2087,13 @@ int perf_evsel__fprintf(struct perf_evsel *evsel,
printed += fprintf(fp, "%s", perf_evsel__name(evsel)); printed += fprintf(fp, "%s", perf_evsel__name(evsel));
if (details->verbose || details->freq) { if (details->verbose) {
printed += perf_event_attr__fprintf(fp, &evsel->attr,
__print_attr__fprintf, &first);
} else if (details->freq) {
printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64, printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
(u64)evsel->attr.sample_freq); (u64)evsel->attr.sample_freq);
} }
if (details->verbose) {
if_print(type);
if_print(config);
if_print(config1);
if_print(config2);
if_print(size);
printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type);
if (evsel->attr.read_format)
printed += read_format__fprintf(fp, &first, evsel->attr.read_format);
if_print(disabled);
if_print(inherit);
if_print(pinned);
if_print(exclusive);
if_print(exclude_user);
if_print(exclude_kernel);
if_print(exclude_hv);
if_print(exclude_idle);
if_print(mmap);
if_print(mmap2);
if_print(comm);
if_print(comm_exec);
if_print(freq);
if_print(inherit_stat);
if_print(enable_on_exec);
if_print(task);
if_print(watermark);
if_print(precise_ip);
if_print(mmap_data);
if_print(sample_id_all);
if_print(exclude_host);
if_print(exclude_guest);
if_print(__reserved_1);
if_print(wakeup_events);
if_print(bp_type);
if_print(branch_sample_type);
}
out: out:
fputc('\n', fp); fputc('\n', fp);
return ++printed; return ++printed;
...@@ -2158,6 +2171,12 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, ...@@ -2158,6 +2171,12 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
"The PMU counters are busy/taken by another profiler.\n" "The PMU counters are busy/taken by another profiler.\n"
"We found oprofile daemon running, please stop it and try again."); "We found oprofile daemon running, please stop it and try again.");
break; break;
case EINVAL:
if (perf_missing_features.clockid)
return scnprintf(msg, size, "clockid feature not supported.");
if (perf_missing_features.clockid_wrong)
return scnprintf(msg, size, "wrong clockid (%d).", clockid);
break;
default: default:
break; break;
} }
......
...@@ -360,4 +360,10 @@ static inline bool has_branch_callstack(struct perf_evsel *evsel) ...@@ -360,4 +360,10 @@ static inline bool has_branch_callstack(struct perf_evsel *evsel)
{ {
return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK; return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
} }
typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
attr__fprintf_f attr__fprintf, void *priv);
#endif /* __PERF_EVSEL_H */ #endif /* __PERF_EVSEL_H */
...@@ -1055,6 +1055,12 @@ read_event_desc(struct perf_header *ph, int fd) ...@@ -1055,6 +1055,12 @@ read_event_desc(struct perf_header *ph, int fd)
goto out; goto out;
} }
static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
void *priv __attribute__((unused)))
{
return fprintf(fp, ", %s = %s", name, val);
}
static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
{ {
struct perf_evsel *evsel, *events = read_event_desc(ph, fd); struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
...@@ -1069,26 +1075,6 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) ...@@ -1069,26 +1075,6 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
for (evsel = events; evsel->attr.size; evsel++) { for (evsel = events; evsel->attr.size; evsel++) {
fprintf(fp, "# event : name = %s, ", evsel->name); fprintf(fp, "# event : name = %s, ", evsel->name);
fprintf(fp, "type = %d, config = 0x%"PRIx64
", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64,
evsel->attr.type,
(u64)evsel->attr.config,
(u64)evsel->attr.config1,
(u64)evsel->attr.config2);
fprintf(fp, ", excl_usr = %d, excl_kern = %d",
evsel->attr.exclude_user,
evsel->attr.exclude_kernel);
fprintf(fp, ", excl_host = %d, excl_guest = %d",
evsel->attr.exclude_host,
evsel->attr.exclude_guest);
fprintf(fp, ", precise_ip = %d", evsel->attr.precise_ip);
fprintf(fp, ", attr_mmap2 = %d", evsel->attr.mmap2);
fprintf(fp, ", attr_mmap = %d", evsel->attr.mmap);
fprintf(fp, ", attr_mmap_data = %d", evsel->attr.mmap_data);
if (evsel->ids) { if (evsel->ids) {
fprintf(fp, ", id = {"); fprintf(fp, ", id = {");
for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
...@@ -1099,6 +1085,8 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) ...@@ -1099,6 +1085,8 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
fprintf(fp, " }"); fprintf(fp, " }");
} }
perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
fputc('\n', fp); fputc('\n', fp);
} }
......
...@@ -679,6 +679,9 @@ int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) ...@@ -679,6 +679,9 @@ int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
machine->vmlinux_maps[type]->unmap_ip = machine->vmlinux_maps[type]->unmap_ip =
identity__map_ip; identity__map_ip;
kmap = map__kmap(machine->vmlinux_maps[type]); kmap = map__kmap(machine->vmlinux_maps[type]);
if (!kmap)
return -1;
kmap->kmaps = &machine->kmaps; kmap->kmaps = &machine->kmaps;
map_groups__insert(&machine->kmaps, map_groups__insert(&machine->kmaps,
machine->vmlinux_maps[type]); machine->vmlinux_maps[type]);
...@@ -700,7 +703,7 @@ void machine__destroy_kernel_maps(struct machine *machine) ...@@ -700,7 +703,7 @@ void machine__destroy_kernel_maps(struct machine *machine)
kmap = map__kmap(machine->vmlinux_maps[type]); kmap = map__kmap(machine->vmlinux_maps[type]);
map_groups__remove(&machine->kmaps, map_groups__remove(&machine->kmaps,
machine->vmlinux_maps[type]); machine->vmlinux_maps[type]);
if (kmap->ref_reloc_sym) { if (kmap && kmap->ref_reloc_sym) {
/* /*
* ref_reloc_sym is shared among all maps, so free just * ref_reloc_sym is shared among all maps, so free just
* on one of them. * on one of them.
......
...@@ -778,3 +778,23 @@ struct map *maps__next(struct map *map) ...@@ -778,3 +778,23 @@ struct map *maps__next(struct map *map)
return rb_entry(next, struct map, rb_node); return rb_entry(next, struct map, rb_node);
return NULL; return NULL;
} }
struct kmap *map__kmap(struct map *map)
{
if (!map->dso || !map->dso->kernel) {
pr_err("Internal error: map__kmap with a non-kernel map\n");
return NULL;
}
return (struct kmap *)(map + 1);
}
struct map_groups *map__kmaps(struct map *map)
{
struct kmap *kmap = map__kmap(map);
if (!kmap || !kmap->kmaps) {
pr_err("Internal error: map__kmaps with a non-kernel map\n");
return NULL;
}
return kmap->kmaps;
}
...@@ -76,10 +76,8 @@ static inline struct map_groups *map_groups__get(struct map_groups *mg) ...@@ -76,10 +76,8 @@ static inline struct map_groups *map_groups__get(struct map_groups *mg)
void map_groups__put(struct map_groups *mg); void map_groups__put(struct map_groups *mg);
static inline struct kmap *map__kmap(struct map *map) struct kmap *map__kmap(struct map *map);
{ struct map_groups *map__kmaps(struct map *map);
return (struct kmap *)(map + 1);
}
static inline u64 map__map_ip(struct map *map, u64 ip) static inline u64 map__map_ip(struct map *map, u64 ip)
{ {
......
...@@ -709,6 +709,7 @@ struct event_modifier { ...@@ -709,6 +709,7 @@ struct event_modifier {
int eh; int eh;
int eH; int eH;
int eG; int eG;
int eI;
int precise; int precise;
int exclude_GH; int exclude_GH;
int sample_read; int sample_read;
...@@ -723,6 +724,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str, ...@@ -723,6 +724,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
int eh = evsel ? evsel->attr.exclude_hv : 0; int eh = evsel ? evsel->attr.exclude_hv : 0;
int eH = evsel ? evsel->attr.exclude_host : 0; int eH = evsel ? evsel->attr.exclude_host : 0;
int eG = evsel ? evsel->attr.exclude_guest : 0; int eG = evsel ? evsel->attr.exclude_guest : 0;
int eI = evsel ? evsel->attr.exclude_idle : 0;
int precise = evsel ? evsel->attr.precise_ip : 0; int precise = evsel ? evsel->attr.precise_ip : 0;
int sample_read = 0; int sample_read = 0;
int pinned = evsel ? evsel->attr.pinned : 0; int pinned = evsel ? evsel->attr.pinned : 0;
...@@ -753,6 +755,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str, ...@@ -753,6 +755,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
if (!exclude_GH) if (!exclude_GH)
exclude_GH = eG = eH = 1; exclude_GH = eG = eH = 1;
eH = 0; eH = 0;
} else if (*str == 'I') {
eI = 1;
} else if (*str == 'p') { } else if (*str == 'p') {
precise++; precise++;
/* use of precise requires exclude_guest */ /* use of precise requires exclude_guest */
...@@ -786,6 +790,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str, ...@@ -786,6 +790,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
mod->eh = eh; mod->eh = eh;
mod->eH = eH; mod->eH = eH;
mod->eG = eG; mod->eG = eG;
mod->eI = eI;
mod->precise = precise; mod->precise = precise;
mod->exclude_GH = exclude_GH; mod->exclude_GH = exclude_GH;
mod->sample_read = sample_read; mod->sample_read = sample_read;
...@@ -803,7 +808,7 @@ static int check_modifier(char *str) ...@@ -803,7 +808,7 @@ static int check_modifier(char *str)
char *p = str; char *p = str;
/* The sizeof includes 0 byte as well. */ /* The sizeof includes 0 byte as well. */
if (strlen(str) > (sizeof("ukhGHpppSD") - 1)) if (strlen(str) > (sizeof("ukhGHpppSDI") - 1))
return -1; return -1;
while (*p) { while (*p) {
...@@ -839,6 +844,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add) ...@@ -839,6 +844,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
evsel->attr.precise_ip = mod.precise; evsel->attr.precise_ip = mod.precise;
evsel->attr.exclude_host = mod.eH; evsel->attr.exclude_host = mod.eH;
evsel->attr.exclude_guest = mod.eG; evsel->attr.exclude_guest = mod.eG;
evsel->attr.exclude_idle = mod.eI;
evsel->exclude_GH = mod.exclude_GH; evsel->exclude_GH = mod.exclude_GH;
evsel->sample_read = mod.sample_read; evsel->sample_read = mod.sample_read;
......
...@@ -101,7 +101,7 @@ num_raw_hex [a-fA-F0-9]+ ...@@ -101,7 +101,7 @@ num_raw_hex [a-fA-F0-9]+
name [a-zA-Z_*?][a-zA-Z0-9_*?]* name [a-zA-Z_*?][a-zA-Z0-9_*?]*
name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]* name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]*
/* If you add a modifier you need to update check_modifier() */ /* If you add a modifier you need to update check_modifier() */
modifier_event [ukhpGHSD]+ modifier_event [ukhpGHSDI]+
modifier_bp [rwx]{1,3} modifier_bp [rwx]{1,3}
%% %%
......
...@@ -135,6 +135,8 @@ static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) ...@@ -135,6 +135,8 @@ static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void)
return NULL; return NULL;
kmap = map__kmap(host_machine->vmlinux_maps[MAP__FUNCTION]); kmap = map__kmap(host_machine->vmlinux_maps[MAP__FUNCTION]);
if (!kmap)
return NULL;
return kmap->ref_reloc_sym; return kmap->ref_reloc_sym;
} }
...@@ -320,7 +322,8 @@ static int find_alternative_probe_point(struct debuginfo *dinfo, ...@@ -320,7 +322,8 @@ static int find_alternative_probe_point(struct debuginfo *dinfo,
ret = -ENOENT; ret = -ENOENT;
goto out; goto out;
} }
pr_debug("Symbol %s address found : %lx\n", pp->function, address); pr_debug("Symbol %s address found : %" PRIx64 "\n",
pp->function, address);
ret = debuginfo__find_probe_point(dinfo, (unsigned long)address, ret = debuginfo__find_probe_point(dinfo, (unsigned long)address,
result); result);
......
...@@ -1466,6 +1466,9 @@ int maps__set_kallsyms_ref_reloc_sym(struct map **maps, ...@@ -1466,6 +1466,9 @@ int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
for (i = 0; i < MAP__NR_TYPES; ++i) { for (i = 0; i < MAP__NR_TYPES; ++i) {
struct kmap *kmap = map__kmap(maps[i]); struct kmap *kmap = map__kmap(maps[i]);
if (!kmap)
continue;
kmap->ref_reloc_sym = ref; kmap->ref_reloc_sym = ref;
} }
......
...@@ -776,6 +776,7 @@ int dso__load_sym(struct dso *dso, struct map *map, ...@@ -776,6 +776,7 @@ int dso__load_sym(struct dso *dso, struct map *map,
symbol_filter_t filter, int kmodule) symbol_filter_t filter, int kmodule)
{ {
struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL;
struct map *curr_map = map; struct map *curr_map = map;
struct dso *curr_dso = dso; struct dso *curr_dso = dso;
Elf_Data *symstrs, *secstrs; Elf_Data *symstrs, *secstrs;
...@@ -791,6 +792,9 @@ int dso__load_sym(struct dso *dso, struct map *map, ...@@ -791,6 +792,9 @@ int dso__load_sym(struct dso *dso, struct map *map,
int nr = 0; int nr = 0;
bool remap_kernel = false, adjust_kernel_syms = false; bool remap_kernel = false, adjust_kernel_syms = false;
if (kmap && !kmaps)
return -1;
dso->symtab_type = syms_ss->type; dso->symtab_type = syms_ss->type;
dso->is_64_bit = syms_ss->is_64_bit; dso->is_64_bit = syms_ss->is_64_bit;
dso->rel = syms_ss->ehdr.e_type == ET_REL; dso->rel = syms_ss->ehdr.e_type == ET_REL;
...@@ -958,8 +962,10 @@ int dso__load_sym(struct dso *dso, struct map *map, ...@@ -958,8 +962,10 @@ int dso__load_sym(struct dso *dso, struct map *map,
map->map_ip = map__map_ip; map->map_ip = map__map_ip;
map->unmap_ip = map__unmap_ip; map->unmap_ip = map__unmap_ip;
/* Ensure maps are correctly ordered */ /* Ensure maps are correctly ordered */
map_groups__remove(kmap->kmaps, map); if (kmaps) {
map_groups__insert(kmap->kmaps, map); map_groups__remove(kmaps, map);
map_groups__insert(kmaps, map);
}
} }
/* /*
...@@ -983,7 +989,7 @@ int dso__load_sym(struct dso *dso, struct map *map, ...@@ -983,7 +989,7 @@ int dso__load_sym(struct dso *dso, struct map *map,
snprintf(dso_name, sizeof(dso_name), snprintf(dso_name, sizeof(dso_name),
"%s%s", dso->short_name, section_name); "%s%s", dso->short_name, section_name);
curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); curr_map = map_groups__find_by_name(kmaps, map->type, dso_name);
if (curr_map == NULL) { if (curr_map == NULL) {
u64 start = sym.st_value; u64 start = sym.st_value;
...@@ -1013,7 +1019,7 @@ int dso__load_sym(struct dso *dso, struct map *map, ...@@ -1013,7 +1019,7 @@ int dso__load_sym(struct dso *dso, struct map *map,
curr_map->unmap_ip = identity__map_ip; curr_map->unmap_ip = identity__map_ip;
} }
curr_dso->symtab_type = dso->symtab_type; curr_dso->symtab_type = dso->symtab_type;
map_groups__insert(kmap->kmaps, curr_map); map_groups__insert(kmaps, curr_map);
/* /*
* The new DSO should go to the kernel DSOS * The new DSO should go to the kernel DSOS
*/ */
...@@ -1075,7 +1081,7 @@ int dso__load_sym(struct dso *dso, struct map *map, ...@@ -1075,7 +1081,7 @@ int dso__load_sym(struct dso *dso, struct map *map,
* We need to fixup this here too because we create new * We need to fixup this here too because we create new
* maps here, for things like vsyscall sections. * maps here, for things like vsyscall sections.
*/ */
__map_groups__fixup_end(kmap->kmaps, map->type); __map_groups__fixup_end(kmaps, map->type);
} }
} }
err = nr; err = nr;
......
...@@ -630,13 +630,16 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename, ...@@ -630,13 +630,16 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
symbol_filter_t filter) symbol_filter_t filter)
{ {
struct map_groups *kmaps = map__kmap(map)->kmaps; struct map_groups *kmaps = map__kmaps(map);
struct map *curr_map; struct map *curr_map;
struct symbol *pos; struct symbol *pos;
int count = 0, moved = 0; int count = 0, moved = 0;
struct rb_root *root = &dso->symbols[map->type]; struct rb_root *root = &dso->symbols[map->type];
struct rb_node *next = rb_first(root); struct rb_node *next = rb_first(root);
if (!kmaps)
return -1;
while (next) { while (next) {
char *module; char *module;
...@@ -682,8 +685,8 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, ...@@ -682,8 +685,8 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta, static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
symbol_filter_t filter) symbol_filter_t filter)
{ {
struct map_groups *kmaps = map__kmap(map)->kmaps; struct map_groups *kmaps = map__kmaps(map);
struct machine *machine = kmaps->machine; struct machine *machine;
struct map *curr_map = map; struct map *curr_map = map;
struct symbol *pos; struct symbol *pos;
int count = 0, moved = 0; int count = 0, moved = 0;
...@@ -691,6 +694,11 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta, ...@@ -691,6 +694,11 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
struct rb_node *next = rb_first(root); struct rb_node *next = rb_first(root);
int kernel_range = 0; int kernel_range = 0;
if (!kmaps)
return -1;
machine = kmaps->machine;
while (next) { while (next) {
char *module; char *module;
...@@ -1025,9 +1033,12 @@ static bool filename_from_kallsyms_filename(char *filename, ...@@ -1025,9 +1033,12 @@ static bool filename_from_kallsyms_filename(char *filename,
static int validate_kcore_modules(const char *kallsyms_filename, static int validate_kcore_modules(const char *kallsyms_filename,
struct map *map) struct map *map)
{ {
struct map_groups *kmaps = map__kmap(map)->kmaps; struct map_groups *kmaps = map__kmaps(map);
char modules_filename[PATH_MAX]; char modules_filename[PATH_MAX];
if (!kmaps)
return -EINVAL;
if (!filename_from_kallsyms_filename(modules_filename, "modules", if (!filename_from_kallsyms_filename(modules_filename, "modules",
kallsyms_filename)) kallsyms_filename))
return -EINVAL; return -EINVAL;
...@@ -1043,6 +1054,9 @@ static int validate_kcore_addresses(const char *kallsyms_filename, ...@@ -1043,6 +1054,9 @@ static int validate_kcore_addresses(const char *kallsyms_filename,
{ {
struct kmap *kmap = map__kmap(map); struct kmap *kmap = map__kmap(map);
if (!kmap)
return -EINVAL;
if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
u64 start; u64 start;
...@@ -1081,8 +1095,8 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) ...@@ -1081,8 +1095,8 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
static int dso__load_kcore(struct dso *dso, struct map *map, static int dso__load_kcore(struct dso *dso, struct map *map,
const char *kallsyms_filename) const char *kallsyms_filename)
{ {
struct map_groups *kmaps = map__kmap(map)->kmaps; struct map_groups *kmaps = map__kmaps(map);
struct machine *machine = kmaps->machine; struct machine *machine;
struct kcore_mapfn_data md; struct kcore_mapfn_data md;
struct map *old_map, *new_map, *replacement_map = NULL; struct map *old_map, *new_map, *replacement_map = NULL;
bool is_64_bit; bool is_64_bit;
...@@ -1090,6 +1104,11 @@ static int dso__load_kcore(struct dso *dso, struct map *map, ...@@ -1090,6 +1104,11 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
char kcore_filename[PATH_MAX]; char kcore_filename[PATH_MAX];
struct symbol *sym; struct symbol *sym;
if (!kmaps)
return -EINVAL;
machine = kmaps->machine;
/* This function requires that the map is the kernel map */ /* This function requires that the map is the kernel map */
if (map != machine->vmlinux_maps[map->type]) if (map != machine->vmlinux_maps[map->type])
return -EINVAL; return -EINVAL;
...@@ -1202,6 +1221,9 @@ static int kallsyms__delta(struct map *map, const char *filename, u64 *delta) ...@@ -1202,6 +1221,9 @@ static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
struct kmap *kmap = map__kmap(map); struct kmap *kmap = map__kmap(map);
u64 addr; u64 addr;
if (!kmap)
return -1;
if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment