Commit 42fd623b authored by Ian Rogers's avatar Ian Rogers Committed by Namhyung Kim

perf maps: Get map before returning in maps__find

Finding a map is done under a lock, returning the map without a
reference count means it can be removed without notice and causing
uses after free. Grab a reference count to the map within the lock
region and return this. Fix up locations that need a map__put
following this.
Signed-off-by: default avatarIan Rogers <irogers@google.com>
Acked-by: default avatarNamhyung Kim <namhyung@kernel.org>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: James Clark <james.clark@arm.com>
Cc: Vincent Whitchurch <vincent.whitchurch@axis.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Colin Ian King <colin.i.king@gmail.com>
Cc: Changbin Du <changbin.du@huawei.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Song Liu <song@kernel.org>
Cc: Leo Yan <leo.yan@linux.dev>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Artem Savkov <asavkov@redhat.com>
Cc: bpf@vger.kernel.org
Signed-off-by: default avatarNamhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20240210031746.4057262-3-irogers@google.com
parent 659ad349
......@@ -34,6 +34,7 @@ static int sample_ustack(struct perf_sample *sample,
}
stack_size = map__end(map) - sp;
map__put(map);
stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size;
memcpy(buf, (void *) sp, stack_size);
......
......@@ -151,10 +151,8 @@ static int test__vmlinux_matches_kallsyms_cb2(struct map *map, void *data)
u64 mem_end = map__unmap_ip(args->vmlinux_map, map__end(map));
pair = maps__find(args->kallsyms.kmaps, mem_start);
if (pair == NULL || map__priv(pair))
return 0;
if (map__start(pair) == mem_start) {
if (pair != NULL && !map__priv(pair) && map__start(pair) == mem_start) {
struct dso *dso = map__dso(map);
if (!args->header_printed) {
......@@ -170,6 +168,7 @@ static int test__vmlinux_matches_kallsyms_cb2(struct map *map, void *data)
pr_info(" %s\n", dso->name);
map__set_priv(pair, 1);
}
map__put(pair);
return 0;
}
......
......@@ -63,6 +63,7 @@ static int machine__process_bpf_event_load(struct machine *machine,
dso->bpf_prog.id = id;
dso->bpf_prog.sub_id = i;
dso->bpf_prog.env = env;
map__put(map);
}
}
return 0;
......
......@@ -511,7 +511,7 @@ size_t perf_event__fprintf_text_poke(union perf_event *event, struct machine *ma
struct addr_location al;
addr_location__init(&al);
al.map = map__get(maps__find(machine__kernel_maps(machine), tp->addr));
al.map = maps__find(machine__kernel_maps(machine), tp->addr);
if (al.map && map__load(al.map) >= 0) {
al.addr = map__map_ip(al.map, tp->addr);
al.sym = map__find_symbol(al.map, al.addr);
......@@ -641,7 +641,7 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
return NULL;
}
al->maps = maps__get(maps);
al->map = map__get(maps__find(maps, al->addr));
al->map = maps__find(maps, al->addr);
if (al->map != NULL) {
/*
* Kernel maps might be changed when loading symbols so loading
......
......@@ -896,7 +896,6 @@ static int machine__process_ksymbol_register(struct machine *machine,
struct symbol *sym;
struct dso *dso;
struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
bool put_map = false;
int err = 0;
if (!map) {
......@@ -913,12 +912,6 @@ static int machine__process_ksymbol_register(struct machine *machine,
err = -ENOMEM;
goto out;
}
/*
* The inserted map has a get on it, we need to put to release
* the reference count here, but do it after all accesses are
* done.
*/
put_map = true;
if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
dso->binary_type = DSO_BINARY_TYPE__OOL;
dso->data.file_size = event->ksymbol.len;
......@@ -952,8 +945,7 @@ static int machine__process_ksymbol_register(struct machine *machine,
}
dso__insert_symbol(dso, sym);
out:
if (put_map)
map__put(map);
map__put(map);
return err;
}
......@@ -977,7 +969,7 @@ static int machine__process_ksymbol_unregister(struct machine *machine,
if (sym)
dso__delete_symbol(dso, sym);
}
map__put(map);
return 0;
}
......@@ -1005,11 +997,11 @@ int machine__process_text_poke(struct machine *machine, union perf_event *event,
perf_event__fprintf_text_poke(event, machine, stdout);
if (!event->text_poke.new_len)
return 0;
goto out;
if (cpumode != PERF_RECORD_MISC_KERNEL) {
pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
return 0;
goto out;
}
if (dso) {
......@@ -1032,7 +1024,8 @@ int machine__process_text_poke(struct machine *machine, union perf_event *event,
pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
event->text_poke.addr);
}
out:
map__put(map);
return 0;
}
......@@ -1300,9 +1293,10 @@ static int machine__map_x86_64_entry_trampolines_cb(struct map *map, void *data)
return 0;
dest_map = maps__find(args->kmaps, map__pgoff(map));
if (dest_map != map)
if (RC_CHK_ACCESS(dest_map) != RC_CHK_ACCESS(map))
map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
map__put(dest_map);
args->found = true;
return 0;
}
......
......@@ -506,15 +506,18 @@ void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data
struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
{
struct map *map = maps__find(maps, addr);
struct symbol *result = NULL;
/* Ensure map is loaded before using map->map_ip */
if (map != NULL && map__load(map) >= 0) {
if (mapp != NULL)
*mapp = map; // TODO: map_put on else path when find returns a get.
return map__find_symbol(map, map__map_ip(map, addr));
}
if (mapp)
*mapp = map;
return NULL;
result = map__find_symbol(map, map__map_ip(map, addr));
if (!mapp)
map__put(map);
}
return result;
}
struct maps__find_symbol_by_name_args {
......@@ -558,7 +561,7 @@ int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
if (maps == NULL)
return -1;
ams->ms.map = maps__find(maps, ams->addr); // TODO: map_get
ams->ms.map = maps__find(maps, ams->addr);
if (ams->ms.map == NULL)
return -1;
}
......@@ -868,7 +871,7 @@ struct map *maps__find(struct maps *maps, u64 ip)
sizeof(*mapp), map__addr_cmp);
if (mapp)
result = *mapp; // map__get(*mapp);
result = map__get(*mapp);
done = true;
}
up_read(maps__lock(maps));
......
......@@ -757,7 +757,6 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
{
struct map *curr_map;
struct symbol *pos;
int count = 0;
struct rb_root_cached old_root = dso->symbols;
......@@ -770,6 +769,7 @@ static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
*root = RB_ROOT_CACHED;
while (next) {
struct map *curr_map;
struct dso *curr_map_dso;
char *module;
......@@ -796,6 +796,7 @@ static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
pos->end -= map__start(curr_map) - map__pgoff(curr_map);
symbols__insert(&curr_map_dso->symbols, pos);
++count;
map__put(curr_map);
}
/* Symbols have been adjusted */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment