Commit 86141027 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo-4.18-20180523' of...

Merge tag 'perf-core-for-mingo-4.18-20180523' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements from Arnaldo Carvalho de Melo:

- Create extra kernel maps to help in decoding samples in x86 PTI entry
  trampolines (Adrian Hunter)

- Copy x86 PTI entry trampoline sections in the kcore copy used for
  annotation and intel_pt CPU traces decoding (Adrian Hunter)

- Support 'perf annotate --group' for non-explicit recorded event
  "groups", showing multiple columns, one for each event, just like
  when dealing with explicit event groups (those enclosed with {}) (Jin Yao)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 2996123e 22916fdb
......@@ -4,6 +4,8 @@ libperf-y += pmu.o
libperf-y += kvm-stat.o
libperf-y += perf_regs.o
libperf-y += group.o
libperf-y += machine.o
libperf-y += event.o
libperf-$(CONFIG_DWARF) += dwarf-regs.o
libperf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/string.h>
#include "../../util/machine.h"
#include "../../util/tool.h"
#include "../../util/map.h"
#include "../../util/util.h"
#include "../../util/debug.h"
#if defined(__x86_64__)
int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
int rc = 0;
struct map *pos;
struct map_groups *kmaps = &machine->kmaps;
struct maps *maps = &kmaps->maps;
union perf_event *event = zalloc(sizeof(event->mmap) +
machine->id_hdr_size);
if (!event) {
pr_debug("Not enough memory synthesizing mmap event "
"for extra kernel maps\n");
return -1;
}
for (pos = maps__first(maps); pos; pos = map__next(pos)) {
struct kmap *kmap;
size_t size;
if (!__map__is_extra_kernel_map(pos))
continue;
kmap = map__kmap(pos);
size = sizeof(event->mmap) - sizeof(event->mmap.filename) +
PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) +
machine->id_hdr_size;
memset(event, 0, size);
event->mmap.header.type = PERF_RECORD_MMAP;
/*
* kernel uses 0 for user space maps, see kernel/perf_event.c
* __perf_event_mmap
*/
if (machine__is_host(machine))
event->header.misc = PERF_RECORD_MISC_KERNEL;
else
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
event->mmap.header.size = size;
event->mmap.start = pos->start;
event->mmap.len = pos->end - pos->start;
event->mmap.pgoff = pos->pgoff;
event->mmap.pid = machine->pid;
strlcpy(event->mmap.filename, kmap->name, PATH_MAX);
if (perf_tool__process_synth_event(tool, event, machine,
process) != 0) {
rc = -1;
break;
}
}
free(event);
return rc;
}
#endif
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/string.h>
#include <stdlib.h>
#include "../../util/machine.h"
#include "../../util/map.h"
#include "../../util/symbol.h"
#include "../../util/sane_ctype.h"
#include <symbol/kallsyms.h>
#if defined(__x86_64__)
struct extra_kernel_map_info {
int cnt;
int max_cnt;
struct extra_kernel_map *maps;
bool get_entry_trampolines;
u64 entry_trampoline;
};
static int add_extra_kernel_map(struct extra_kernel_map_info *mi, u64 start,
u64 end, u64 pgoff, const char *name)
{
if (mi->cnt >= mi->max_cnt) {
void *buf;
size_t sz;
mi->max_cnt = mi->max_cnt ? mi->max_cnt * 2 : 32;
sz = sizeof(struct extra_kernel_map) * mi->max_cnt;
buf = realloc(mi->maps, sz);
if (!buf)
return -1;
mi->maps = buf;
}
mi->maps[mi->cnt].start = start;
mi->maps[mi->cnt].end = end;
mi->maps[mi->cnt].pgoff = pgoff;
strlcpy(mi->maps[mi->cnt].name, name, KMAP_NAME_LEN);
mi->cnt += 1;
return 0;
}
static int find_extra_kernel_maps(void *arg, const char *name, char type,
u64 start)
{
struct extra_kernel_map_info *mi = arg;
if (!mi->entry_trampoline && kallsyms2elf_binding(type) == STB_GLOBAL &&
!strcmp(name, "_entry_trampoline")) {
mi->entry_trampoline = start;
return 0;
}
if (is_entry_trampoline(name)) {
u64 end = start + page_size;
return add_extra_kernel_map(mi, start, end, 0, name);
}
return 0;
}
int machine__create_extra_kernel_maps(struct machine *machine,
struct dso *kernel)
{
struct extra_kernel_map_info mi = { .cnt = 0, };
char filename[PATH_MAX];
int ret;
int i;
machine__get_kallsyms_filename(machine, filename, PATH_MAX);
if (symbol__restricted_filename(filename, "/proc/kallsyms"))
return 0;
ret = kallsyms__parse(filename, &mi, find_extra_kernel_maps);
if (ret)
goto out_free;
if (!mi.entry_trampoline)
goto out_free;
for (i = 0; i < mi.cnt; i++) {
struct extra_kernel_map *xm = &mi.maps[i];
xm->pgoff = mi.entry_trampoline;
ret = machine__create_extra_kernel_map(machine, kernel, xm);
if (ret)
goto out_free;
}
machine->trampolines_mapped = mi.cnt;
out_free:
free(mi.maps);
return ret;
}
#endif
......@@ -45,6 +45,7 @@ struct perf_annotate {
bool print_line;
bool skip_missing;
bool has_br_stack;
bool group_set;
const char *sym_hist_filter;
const char *cpu_list;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
......@@ -508,6 +509,9 @@ int cmd_annotate(int argc, const char **argv)
"Don't shorten the displayed pathnames"),
OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing,
"Skip symbols that cannot be annotated"),
OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group,
&annotate.group_set,
"Show event group information together"),
OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
OPT_CALLBACK(0, "symfs", NULL, "directory",
"Look for files with symbols relative to this directory",
......@@ -570,6 +574,9 @@ int cmd_annotate(int argc, const char **argv)
annotate.has_br_stack = perf_header__has_feat(&annotate.session->header,
HEADER_BRANCH_STACK);
if (annotate.group_set)
perf_evlist__force_leader(annotate.session->evlist);
ret = symbol__annotation_init();
if (ret < 0)
goto out_delete;
......
......@@ -194,20 +194,11 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
return err;
}
/*
* Events in data file are not collect in groups, but we still want
* the group display. Set the artificial group and set the leader's
* forced_leader flag to notify the display code.
*/
static void setup_forced_leader(struct report *report,
struct perf_evlist *evlist)
{
if (report->group_set && !evlist->nr_groups) {
struct perf_evsel *leader = perf_evlist__first(evlist);
perf_evlist__set_leader(evlist);
leader->forced_leader = true;
}
if (report->group_set)
perf_evlist__force_leader(evlist);
}
static int process_feature_event(struct perf_tool *tool,
......
......@@ -1965,6 +1965,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
u64 len;
int width = symbol_conf.show_total_period ? 12 : 8;
int graph_dotted_len;
char buf[512];
filename = strdup(dso->long_name);
if (!filename)
......@@ -1977,8 +1978,11 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
len = symbol__size(sym);
if (perf_evsel__is_group_event(evsel))
if (perf_evsel__is_group_event(evsel)) {
width *= evsel->nr_members;
perf_evsel__group_desc(evsel, buf, sizeof(buf));
evsel_name = buf;
}
graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n",
width, width, symbol_conf.show_total_period ? "Period" :
......
......@@ -106,11 +106,24 @@ static int perf_env__read_arch(struct perf_env *env)
return env->arch ? 0 : -ENOMEM;
}
static int perf_env__read_nr_cpus_avail(struct perf_env *env)
{
if (env->nr_cpus_avail == 0)
env->nr_cpus_avail = cpu__max_present_cpu();
return env->nr_cpus_avail ? 0 : -ENOENT;
}
const char *perf_env__raw_arch(struct perf_env *env)
{
return env && !perf_env__read_arch(env) ? env->arch : "unknown";
}
int perf_env__nr_cpus_avail(struct perf_env *env)
{
return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
}
void cpu_cache_level__free(struct cpu_cache_level *cache)
{
free(cache->type);
......
......@@ -77,5 +77,6 @@ void cpu_cache_level__free(struct cpu_cache_level *cache);
const char *perf_env__arch(struct perf_env *env);
const char *perf_env__raw_arch(struct perf_env *env);
int perf_env__nr_cpus_avail(struct perf_env *env);
#endif /* __PERF_ENV_H */
......@@ -88,7 +88,7 @@ static const char *perf_ns__name(unsigned int id)
return perf_ns__names[id];
}
static int perf_tool__process_synth_event(struct perf_tool *tool,
int perf_tool__process_synth_event(struct perf_tool *tool,
union perf_event *event,
struct machine *machine,
perf_event__handler_t process)
......@@ -487,7 +487,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
for (pos = maps__first(maps); pos; pos = map__next(pos)) {
size_t size;
if (__map__is_kernel(pos))
if (!__map__is_kmodule(pos))
continue;
size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
......@@ -888,7 +888,14 @@ int kallsyms__get_function_start(const char *kallsyms_filename,
return 0;
}
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
perf_event__handler_t process __maybe_unused,
struct machine *machine __maybe_unused)
{
return 0;
}
static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
......@@ -943,6 +950,19 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
return err;
}
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
int err;
err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
if (err < 0)
return err;
return perf_event__synthesize_extra_kmaps(tool, process, machine);
}
int perf_event__synthesize_thread_map2(struct perf_tool *tool,
struct thread_map *threads,
perf_event__handler_t process,
......
......@@ -750,6 +750,10 @@ int perf_event__process_exit(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
int perf_tool__process_synth_event(struct perf_tool *tool,
union perf_event *event,
struct machine *machine,
perf_event__handler_t process);
int perf_event__process(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
......@@ -796,6 +800,10 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
bool mmap_data,
unsigned int proc_map_timeout);
int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine);
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
......
......@@ -1795,3 +1795,18 @@ bool perf_evlist__exclude_kernel(struct perf_evlist *evlist)
return true;
}
/*
* Events in data file are not collect in groups, but we still want
* the group display. Set the artificial group and set the leader's
* forced_leader flag to notify the display code.
*/
void perf_evlist__force_leader(struct perf_evlist *evlist)
{
if (!evlist->nr_groups) {
struct perf_evsel *leader = perf_evlist__first(evlist);
perf_evlist__set_leader(evlist);
leader->forced_leader = true;
}
}
......@@ -309,4 +309,7 @@ struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
union perf_event *event);
bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
void perf_evlist__force_leader(struct perf_evlist *evlist);
#endif /* __PERF_EVLIST_H */
......@@ -807,7 +807,7 @@ struct process_args {
u64 start;
};
static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
void machine__get_kallsyms_filename(struct machine *machine, char *buf,
size_t bufsz)
{
if (machine__is_default_guest(machine))
......@@ -851,6 +851,130 @@ static int machine__get_running_kernel_start(struct machine *machine,
return 0;
}
int machine__create_extra_kernel_map(struct machine *machine,
struct dso *kernel,
struct extra_kernel_map *xm)
{
struct kmap *kmap;
struct map *map;
map = map__new2(xm->start, kernel);
if (!map)
return -1;
map->end = xm->end;
map->pgoff = xm->pgoff;
kmap = map__kmap(map);
kmap->kmaps = &machine->kmaps;
strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
map_groups__insert(&machine->kmaps, map);
pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
kmap->name, map->start, map->end);
map__put(map);
return 0;
}
static u64 find_entry_trampoline(struct dso *dso)
{
/* Duplicates are removed so lookup all aliases */
const char *syms[] = {
"_entry_trampoline",
"__entry_trampoline_start",
"entry_SYSCALL_64_trampoline",
};
struct symbol *sym = dso__first_symbol(dso);
unsigned int i;
for (; sym; sym = dso__next_symbol(sym)) {
if (sym->binding != STB_GLOBAL)
continue;
for (i = 0; i < ARRAY_SIZE(syms); i++) {
if (!strcmp(sym->name, syms[i]))
return sym->start;
}
}
return 0;
}
/*
* These values can be used for kernels that do not have symbols for the entry
* trampolines in kallsyms.
*/
#define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
#define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
#define X86_64_ENTRY_TRAMPOLINE 0x6000
/* Map x86_64 PTI entry trampolines */
int machine__map_x86_64_entry_trampolines(struct machine *machine,
struct dso *kernel)
{
struct map_groups *kmaps = &machine->kmaps;
struct maps *maps = &kmaps->maps;
int nr_cpus_avail, cpu;
bool found = false;
struct map *map;
u64 pgoff;
/*
* In the vmlinux case, pgoff is a virtual address which must now be
* mapped to a vmlinux offset.
*/
for (map = maps__first(maps); map; map = map__next(map)) {
struct kmap *kmap = __map__kmap(map);
struct map *dest_map;
if (!kmap || !is_entry_trampoline(kmap->name))
continue;
dest_map = map_groups__find(kmaps, map->pgoff);
if (dest_map != map)
map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
found = true;
}
if (found || machine->trampolines_mapped)
return 0;
pgoff = find_entry_trampoline(kernel);
if (!pgoff)
return 0;
nr_cpus_avail = machine__nr_cpus_avail(machine);
/* Add a 1 page map for each CPU's entry trampoline */
for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
cpu * X86_64_CPU_ENTRY_AREA_SIZE +
X86_64_ENTRY_TRAMPOLINE;
struct extra_kernel_map xm = {
.start = va,
.end = va + page_size,
.pgoff = pgoff,
};
strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
return -1;
}
machine->trampolines_mapped = nr_cpus_avail;
return 0;
}
int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
struct dso *kernel __maybe_unused)
{
return 0;
}
static int
__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
{
......@@ -1206,9 +1330,8 @@ int machine__create_kernel_maps(struct machine *machine)
return -1;
ret = __machine__create_kernel_maps(machine, kernel);
dso__put(kernel);
if (ret < 0)
return -1;
goto out_put;
if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
if (machine__is_host(machine))
......@@ -1223,7 +1346,8 @@ int machine__create_kernel_maps(struct machine *machine)
if (name &&
map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, addr)) {
machine__destroy_kernel_maps(machine);
return -1;
ret = -1;
goto out_put;
}
/* we have a real start address now, so re-order the kmaps */
......@@ -1239,12 +1363,16 @@ int machine__create_kernel_maps(struct machine *machine)
map__put(map);
}
if (machine__create_extra_kernel_maps(machine, kernel))
pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
/* update end address of the kernel map using adjacent module address */
map = map__next(machine__kernel_map(machine));
if (map)
machine__set_kernel_mmap(machine, addr, map->start);
return 0;
out_put:
dso__put(kernel);
return ret;
}
static bool machine__uses_kcore(struct machine *machine)
......@@ -1259,6 +1387,32 @@ static bool machine__uses_kcore(struct machine *machine)
return false;
}
static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
union perf_event *event)
{
return machine__is(machine, "x86_64") &&
is_entry_trampoline(event->mmap.filename);
}
static int machine__process_extra_kernel_map(struct machine *machine,
union perf_event *event)
{
struct map *kernel_map = machine__kernel_map(machine);
struct dso *kernel = kernel_map ? kernel_map->dso : NULL;
struct extra_kernel_map xm = {
.start = event->mmap.start,
.end = event->mmap.start + event->mmap.len,
.pgoff = event->mmap.pgoff,
};
if (kernel == NULL)
return -1;
strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
return machine__create_extra_kernel_map(machine, kernel, &xm);
}
static int machine__process_kernel_mmap_event(struct machine *machine,
union perf_event *event)
{
......@@ -1362,6 +1516,8 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
*/
dso__load(kernel, machine__kernel_map(machine));
}
} else if (perf_event__is_extra_kernel_mmap(machine, event)) {
return machine__process_extra_kernel_map(machine, event);
}
return 0;
out_problem:
......@@ -2305,6 +2461,11 @@ bool machine__is(struct machine *machine, const char *arch)
return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
}
int machine__nr_cpus_avail(struct machine *machine)
{
return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
}
int machine__get_kernel_start(struct machine *machine)
{
struct map *map = machine__kernel_map(machine);
......
......@@ -56,6 +56,7 @@ struct machine {
void *priv;
u64 db_id;
};
bool trampolines_mapped;
};
static inline struct threads *machine__threads(struct machine *machine, pid_t tid)
......@@ -189,6 +190,7 @@ static inline bool machine__is_host(struct machine *machine)
}
bool machine__is(struct machine *machine, const char *arch);
int machine__nr_cpus_avail(struct machine *machine);
struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
......@@ -267,4 +269,25 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
*/
char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp);
void machine__get_kallsyms_filename(struct machine *machine, char *buf,
size_t bufsz);
int machine__create_extra_kernel_maps(struct machine *machine,
struct dso *kernel);
/* Kernel-space maps for symbols that are outside the main kernel map and module maps */
struct extra_kernel_map {
u64 start;
u64 end;
u64 pgoff;
char name[KMAP_NAME_LEN];
};
int machine__create_extra_kernel_map(struct machine *machine,
struct dso *kernel,
struct extra_kernel_map *xm);
int machine__map_x86_64_entry_trampolines(struct machine *machine,
struct dso *kernel);
#endif /* __PERF_MACHINE_H */
......@@ -252,6 +252,13 @@ bool __map__is_kernel(const struct map *map)
return machine__kernel_map(map->groups->machine) == map;
}
bool __map__is_extra_kernel_map(const struct map *map)
{
struct kmap *kmap = __map__kmap((struct map *)map);
return kmap && kmap->name[0];
}
bool map__has_symbols(const struct map *map)
{
return dso__has_symbols(map->dso);
......@@ -846,15 +853,22 @@ struct map *map__next(struct map *map)
return NULL;
}
struct kmap *map__kmap(struct map *map)
struct kmap *__map__kmap(struct map *map)
{
if (!map->dso || !map->dso->kernel) {
pr_err("Internal error: map__kmap with a non-kernel map\n");
if (!map->dso || !map->dso->kernel)
return NULL;
}
return (struct kmap *)(map + 1);
}
struct kmap *map__kmap(struct map *map)
{
struct kmap *kmap = __map__kmap(map);
if (!kmap)
pr_err("Internal error: map__kmap with a non-kernel map\n");
return kmap;
}
struct map_groups *map__kmaps(struct map *map)
{
struct kmap *kmap = map__kmap(map);
......
......@@ -8,6 +8,7 @@
#include <linux/rbtree.h>
#include <pthread.h>
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <linux/types.h>
#include "rwsem.h"
......@@ -46,9 +47,12 @@ struct map {
refcount_t refcnt;
};
#define KMAP_NAME_LEN 256
struct kmap {
struct ref_reloc_sym *ref_reloc_sym;
struct map_groups *kmaps;
char name[KMAP_NAME_LEN];
};
struct maps {
......@@ -75,6 +79,7 @@ static inline struct map_groups *map_groups__get(struct map_groups *mg)
void map_groups__put(struct map_groups *mg);
struct kmap *__map__kmap(struct map *map);
struct kmap *map__kmap(struct map *map);
struct map_groups *map__kmaps(struct map *map);
......@@ -231,12 +236,20 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
struct map *map_groups__find_by_name(struct map_groups *mg, const char *name);
bool __map__is_kernel(const struct map *map);
bool __map__is_extra_kernel_map(const struct map *map);
static inline bool __map__is_kmodule(const struct map *map)
{
return !__map__is_kernel(map);
return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map);
}
bool map__has_symbols(const struct map *map);
#define ENTRY_TRAMPOLINE_NAME "__entry_SYSCALL_64_trampoline"
static inline bool is_entry_trampoline(const char *name)
{
return !strcmp(name, ENTRY_TRAMPOLINE_NAME);
}
#endif /* __PERF_MAP_H */
......@@ -1386,8 +1386,16 @@ static off_t kcore__write(struct kcore *kcore)
struct phdr_data {
off_t offset;
off_t rel;
u64 addr;
u64 len;
struct list_head node;
struct phdr_data *remaps;
};
struct sym_data {
u64 addr;
struct list_head node;
};
struct kcore_copy_info {
......@@ -1397,10 +1405,72 @@ struct kcore_copy_info {
u64 last_symbol;
u64 first_module;
u64 last_module_symbol;
struct phdr_data kernel_map;
struct phdr_data modules_map;
size_t phnum;
struct list_head phdrs;
struct list_head syms;
};
#define kcore_copy__for_each_phdr(k, p) \
list_for_each_entry((p), &(k)->phdrs, node)
static struct phdr_data *phdr_data__new(u64 addr, u64 len, off_t offset)
{
struct phdr_data *p = zalloc(sizeof(*p));
if (p) {
p->addr = addr;
p->len = len;
p->offset = offset;
}
return p;
}
static struct phdr_data *kcore_copy_info__addnew(struct kcore_copy_info *kci,
u64 addr, u64 len,
off_t offset)
{
struct phdr_data *p = phdr_data__new(addr, len, offset);
if (p)
list_add_tail(&p->node, &kci->phdrs);
return p;
}
static void kcore_copy__free_phdrs(struct kcore_copy_info *kci)
{
struct phdr_data *p, *tmp;
list_for_each_entry_safe(p, tmp, &kci->phdrs, node) {
list_del(&p->node);
free(p);
}
}
static struct sym_data *kcore_copy__new_sym(struct kcore_copy_info *kci,
u64 addr)
{
struct sym_data *s = zalloc(sizeof(*s));
if (s) {
s->addr = addr;
list_add_tail(&s->node, &kci->syms);
}
return s;
}
static void kcore_copy__free_syms(struct kcore_copy_info *kci)
{
struct sym_data *s, *tmp;
list_for_each_entry_safe(s, tmp, &kci->syms, node) {
list_del(&s->node);
free(s);
}
}
static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
u64 start)
{
......@@ -1431,6 +1501,9 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
return 0;
}
if (is_entry_trampoline(name) && !kcore_copy__new_sym(kci, start))
return -1;
return 0;
}
......@@ -1480,27 +1553,39 @@ static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
return 0;
}
static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
u64 s, u64 e)
static int kcore_copy__map(struct kcore_copy_info *kci, u64 start, u64 end,
u64 pgoff, u64 s, u64 e)
{
if (p->addr || s < start || s >= end)
return;
u64 len, offset;
if (s < start || s >= end)
return 0;
offset = (s - start) + pgoff;
len = e < end ? e - s : end - s;
p->addr = s;
p->offset = (s - start) + pgoff;
p->len = e < end ? e - s : end - s;
return kcore_copy_info__addnew(kci, s, len, offset) ? 0 : -1;
}
static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
{
struct kcore_copy_info *kci = data;
u64 end = start + len;
struct sym_data *sdat;
kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
kci->etext);
if (kcore_copy__map(kci, start, end, pgoff, kci->stext, kci->etext))
return -1;
if (kcore_copy__map(kci, start, end, pgoff, kci->first_module,
kci->last_module_symbol))
return -1;
kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
kci->last_module_symbol);
list_for_each_entry(sdat, &kci->syms, node) {
u64 s = round_down(sdat->addr, page_size);
if (kcore_copy__map(kci, start, end, pgoff, s, s + len))
return -1;
}
return 0;
}
......@@ -1513,6 +1598,64 @@ static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
return 0;
}
static void kcore_copy__find_remaps(struct kcore_copy_info *kci)
{
struct phdr_data *p, *k = NULL;
u64 kend;
if (!kci->stext)
return;
/* Find phdr that corresponds to the kernel map (contains stext) */
kcore_copy__for_each_phdr(kci, p) {
u64 pend = p->addr + p->len - 1;
if (p->addr <= kci->stext && pend >= kci->stext) {
k = p;
break;
}
}
if (!k)
return;
kend = k->offset + k->len;
/* Find phdrs that remap the kernel */
kcore_copy__for_each_phdr(kci, p) {
u64 pend = p->offset + p->len;
if (p == k)
continue;
if (p->offset >= k->offset && pend <= kend)
p->remaps = k;
}
}
static void kcore_copy__layout(struct kcore_copy_info *kci)
{
struct phdr_data *p;
off_t rel = 0;
kcore_copy__find_remaps(kci);
kcore_copy__for_each_phdr(kci, p) {
if (!p->remaps) {
p->rel = rel;
rel += p->len;
}
kci->phnum += 1;
}
kcore_copy__for_each_phdr(kci, p) {
struct phdr_data *k = p->remaps;
if (k)
p->rel = p->offset - k->offset + k->rel;
}
}
static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
Elf *elf)
{
......@@ -1548,7 +1691,12 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
if (kci->first_module && !kci->last_module_symbol)
return -1;
return kcore_copy__read_maps(kci, elf);
if (kcore_copy__read_maps(kci, elf))
return -1;
kcore_copy__layout(kci);
return 0;
}
static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
......@@ -1671,12 +1819,15 @@ int kcore_copy(const char *from_dir, const char *to_dir)
{
struct kcore kcore;
struct kcore extract;
size_t count = 2;
int idx = 0, err = -1;
off_t offset = page_size, sz, modules_offset = 0;
off_t offset, sz;
struct kcore_copy_info kci = { .stext = 0, };
char kcore_filename[PATH_MAX];
char extract_filename[PATH_MAX];
struct phdr_data *p;
INIT_LIST_HEAD(&kci.phdrs);
INIT_LIST_HEAD(&kci.syms);
if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
return -1;
......@@ -1696,20 +1847,17 @@ int kcore_copy(const char *from_dir, const char *to_dir)
if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
goto out_kcore_close;
if (!kci.modules_map.addr)
count -= 1;
if (kcore__copy_hdr(&kcore, &extract, count))
if (kcore__copy_hdr(&kcore, &extract, kci.phnum))
goto out_extract_close;
if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
kci.kernel_map.len))
goto out_extract_close;
offset = gelf_fsize(extract.elf, ELF_T_EHDR, 1, EV_CURRENT) +
gelf_fsize(extract.elf, ELF_T_PHDR, kci.phnum, EV_CURRENT);
offset = round_up(offset, page_size);
if (kci.modules_map.addr) {
modules_offset = offset + kci.kernel_map.len;
if (kcore__add_phdr(&extract, idx, modules_offset,
kci.modules_map.addr, kci.modules_map.len))
kcore_copy__for_each_phdr(&kci, p) {
off_t offs = p->rel + offset;
if (kcore__add_phdr(&extract, idx++, offs, p->addr, p->len))
goto out_extract_close;
}
......@@ -1717,14 +1865,14 @@ int kcore_copy(const char *from_dir, const char *to_dir)
if (sz < 0 || sz > offset)
goto out_extract_close;
if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
kci.kernel_map.len))
goto out_extract_close;
kcore_copy__for_each_phdr(&kci, p) {
off_t offs = p->rel + offset;
if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
extract.fd, modules_offset,
kci.modules_map.len))
if (p->remaps)
continue;
if (copy_bytes(kcore.fd, p->offset, extract.fd, offs, p->len))
goto out_extract_close;
}
if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
goto out_extract_close;
......@@ -1747,6 +1895,9 @@ int kcore_copy(const char *from_dir, const char *to_dir)
if (err)
kcore_copy__unlink(to_dir, "kallsyms");
kcore_copy__free_phdrs(&kci);
kcore_copy__free_syms(&kci);
return err;
}
......
......@@ -737,12 +737,15 @@ static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso,
struct rb_root *root = &dso->symbols;
struct rb_node *next = rb_first(root);
int kernel_range = 0;
bool x86_64;
if (!kmaps)
return -1;
machine = kmaps->machine;
x86_64 = machine__is(machine, "x86_64");
while (next) {
char *module;
......@@ -790,6 +793,16 @@ static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso,
*/
pos->start = curr_map->map_ip(curr_map, pos->start);
pos->end = curr_map->map_ip(curr_map, pos->end);
} else if (x86_64 && is_entry_trampoline(pos->name)) {
/*
* These symbols are not needed anymore since the
* trampoline maps refer to the text section and it's
* symbols instead. Avoid having to deal with
* relocations, and the assumption that the first symbol
* is the start of kernel text, by simply removing the
* symbols at this point.
*/
goto discard_symbol;
} else if (curr_map != initial_map) {
char dso_name[PATH_MAX];
struct dso *ndso;
......@@ -1017,7 +1030,7 @@ struct map *map_groups__first(struct map_groups *mg)
return maps__first(&mg->maps);
}
static int do_validate_kcore_modules(const char *filename, struct map *map,
static int do_validate_kcore_modules(const char *filename,
struct map_groups *kmaps)
{
struct rb_root modules = RB_ROOT;
......@@ -1033,8 +1046,7 @@ static int do_validate_kcore_modules(const char *filename, struct map *map,
struct map *next = map_groups__next(old_map);
struct module_info *mi;
if (old_map == map || old_map->start == map->start) {
/* The kernel map */
if (!__map__is_kmodule(old_map)) {
old_map = next;
continue;
}
......@@ -1091,7 +1103,7 @@ static int validate_kcore_modules(const char *kallsyms_filename,
kallsyms_filename))
return -EINVAL;
if (do_validate_kcore_modules(modules_filename, map, kmaps))
if (do_validate_kcore_modules(modules_filename, kmaps))
return -EINVAL;
return 0;
......@@ -1146,6 +1158,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
struct map_groups *kmaps = map__kmaps(map);
struct kcore_mapfn_data md;
struct map *old_map, *new_map, *replacement_map = NULL;
struct machine *machine;
bool is_64_bit;
int err, fd;
char kcore_filename[PATH_MAX];
......@@ -1154,6 +1167,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
if (!kmaps)
return -EINVAL;
machine = kmaps->machine;
/* This function requires that the map is the kernel map */
if (!__map__is_kernel(map))
return -EINVAL;
......@@ -1197,6 +1212,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
map_groups__remove(kmaps, old_map);
old_map = next;
}
machine->trampolines_mapped = false;
/* Find the kernel map using the '_stext' symbol */
if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
......@@ -1233,6 +1249,19 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
map__put(new_map);
}
if (machine__is(machine, "x86_64")) {
u64 addr;
/*
* If one of the corresponding symbols is there, assume the
* entry trampoline maps are too.
*/
if (!kallsyms__get_function_start(kallsyms_filename,
ENTRY_TRAMPOLINE_NAME,
&addr))
machine->trampolines_mapped = true;
}
/*
* Set the data type and long name so that kcore can be read via
* dso__data_read_addr().
......@@ -1490,20 +1519,22 @@ int dso__load(struct dso *dso, struct map *map)
goto out;
}
if (map->groups && map->groups->machine)
machine = map->groups->machine;
else
machine = NULL;
if (dso->kernel) {
if (dso->kernel == DSO_TYPE_KERNEL)
ret = dso__load_kernel_sym(dso, map);
else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
ret = dso__load_guest_kernel_sym(dso, map);
if (machine__is(machine, "x86_64"))
machine__map_x86_64_entry_trampolines(machine, dso);
goto out;
}
if (map->groups && map->groups->machine)
machine = map->groups->machine;
else
machine = NULL;
dso->adjust_symbols = 0;
if (perfmap) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment