Commit f1942b96 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core refactorings and improvements from Arnaldo Carvalho de Melo:

User visible changes:

  - Add hint for 'Too many events are opened.' error message (Jiri Olsa)

Infrastructure changes:

  - Protect accesses to map rbtrees with a lock and refcount struct map,
    reducing memory usage as maps not used get freed. The 'dso' struct is
    next in line. (Arnaldo Carvalho de Melo)

  - Annotation and branch related option parsing refactorings to
    share code with upcoming patches (Andi Kleen)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 09a216ea f00898f4
......@@ -59,6 +59,10 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
(al->sym == NULL ||
strcmp(ann->sym_hist_filter, al->sym->name) != 0)) {
/* We're only interested in a symbol named sym_hist_filter */
/*
* FIXME: why isn't this done in the symbol_filter when loading
* the DSO?
*/
if (al->sym != NULL) {
rb_erase(&al->sym->rb_node,
&al->map->dso->symbols[al->map->type]);
......
......@@ -28,6 +28,7 @@
#include "util/thread_map.h"
#include "util/data.h"
#include "util/auxtrace.h"
#include "util/parse-branch-options.h"
#include <unistd.h>
#include <sched.h>
......@@ -751,94 +752,6 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
return status;
}
#define BRANCH_OPT(n, m) \
{ .name = n, .mode = (m) }
#define BRANCH_END { .name = NULL }
struct branch_mode {
const char *name;
int mode;
};
static const struct branch_mode branch_modes[] = {
BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
BRANCH_END
};
static int
parse_branch_stack(const struct option *opt, const char *str, int unset)
{
#define ONLY_PLM \
(PERF_SAMPLE_BRANCH_USER |\
PERF_SAMPLE_BRANCH_KERNEL |\
PERF_SAMPLE_BRANCH_HV)
uint64_t *mode = (uint64_t *)opt->value;
const struct branch_mode *br;
char *s, *os = NULL, *p;
int ret = -1;
if (unset)
return 0;
/*
* cannot set it twice, -b + --branch-filter for instance
*/
if (*mode)
return -1;
/* str may be NULL in case no arg is passed to -b */
if (str) {
/* because str is read-only */
s = os = strdup(str);
if (!s)
return -1;
for (;;) {
p = strchr(s, ',');
if (p)
*p = '\0';
for (br = branch_modes; br->name; br++) {
if (!strcasecmp(s, br->name))
break;
}
if (!br->name) {
ui__warning("unknown branch filter %s,"
" check man page\n", s);
goto error;
}
*mode |= br->mode;
if (!p)
break;
s = p + 1;
}
}
ret = 0;
/* default to any branch */
if ((*mode & ~ONLY_PLM) == 0) {
*mode = PERF_SAMPLE_BRANCH_ANY;
}
error:
free(os);
return ret;
}
static void callchain_debug(void)
{
static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
......
......@@ -26,7 +26,7 @@ int test__vmlinux_matches_kallsyms(void)
struct map *kallsyms_map, *vmlinux_map, *map;
struct machine kallsyms, vmlinux;
enum map_type type = MAP__FUNCTION;
struct rb_root *maps = &vmlinux.kmaps.maps[type];
struct maps *maps = &vmlinux.kmaps.maps[type];
u64 mem_start, mem_end;
/*
......
......@@ -75,6 +75,7 @@ libperf-$(CONFIG_X86) += tsc.o
libperf-y += cloexec.o
libperf-y += thread-stack.o
libperf-$(CONFIG_AUXTRACE) += auxtrace.o
libperf-y += parse-branch-options.o
libperf-$(CONFIG_LIBELF) += symbol-elf.o
libperf-$(CONFIG_LIBELF) += probe-event.o
......
......@@ -506,6 +506,17 @@ static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
return 0;
}
static struct annotation *symbol__get_annotation(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
if (notes->src == NULL) {
if (symbol__alloc_hist(sym) < 0)
return NULL;
}
return notes;
}
static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
int evidx, u64 addr)
{
......@@ -513,13 +524,9 @@ static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
if (sym == NULL)
return 0;
notes = symbol__annotation(sym);
if (notes->src == NULL) {
if (symbol__alloc_hist(sym) < 0)
return -ENOMEM;
}
notes = symbol__get_annotation(sym);
if (notes == NULL)
return -ENOMEM;
return __symbol__inc_addr_samples(sym, map, notes, evidx, addr);
}
......
......@@ -331,7 +331,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
int rc = 0;
struct map *pos;
struct map_groups *kmaps = &machine->kmaps;
struct rb_root *maps = &kmaps->maps[MAP__FUNCTION];
struct maps *maps = &kmaps->maps[MAP__FUNCTION];
union perf_event *event = zalloc((sizeof(event->mmap) +
machine->id_hdr_size));
if (event == NULL) {
......
......@@ -2149,7 +2149,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
case EMFILE:
return scnprintf(msg, size, "%s",
"Too many events are opened.\n"
"Try again after reducing the number of events.");
"Probably the maximum number of open file descriptors has been reached.\n"
"Hint: Try again after reducing the number of events.\n"
"Hint: Try increasing the limit with 'ulimit -n <limit>'");
case ENODEV:
if (target->cpu_list)
return scnprintf(msg, size, "%s",
......
......@@ -759,7 +759,6 @@ void machine__destroy_kernel_maps(struct machine *machine)
kmap->ref_reloc_sym = NULL;
}
map__delete(machine->vmlinux_maps[type]);
machine->vmlinux_maps[type] = NULL;
}
}
......@@ -1247,6 +1246,7 @@ int machine__process_mmap2_event(struct machine *machine,
thread__insert_map(thread, map);
thread__put(thread);
map__put(map);
return 0;
out_problem_map:
......@@ -1297,6 +1297,7 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
thread__insert_map(thread, map);
thread__put(thread);
map__put(map);
return 0;
out_problem_map:
......
This diff is collapsed.
......@@ -5,6 +5,7 @@
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <pthread.h>
#include <stdio.h>
#include <stdbool.h>
#include <linux/types.h>
......@@ -51,6 +52,7 @@ struct map {
struct dso *dso;
struct map_groups *groups;
atomic_t refcnt;
};
struct kmap {
......@@ -58,9 +60,14 @@ struct kmap {
struct map_groups *kmaps;
};
struct maps {
struct rb_root entries;
pthread_rwlock_t lock;
struct list_head removed_maps;
};
struct map_groups {
struct rb_root maps[MAP__NR_TYPES];
struct list_head removed_maps[MAP__NR_TYPES];
struct maps maps[MAP__NR_TYPES];
struct machine *machine;
atomic_t refcnt;
};
......@@ -144,6 +151,16 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
void map__delete(struct map *map);
struct map *map__clone(struct map *map);
static inline struct map *map__get(struct map *map)
{
if (map)
atomic_inc(&map->refcnt);
return map;
}
void map__put(struct map *map);
int map__overlap(struct map *l, struct map *r);
size_t map__fprintf(struct map *map, FILE *fp);
size_t map__fprintf_dsoname(struct map *map, FILE *fp);
......@@ -162,10 +179,10 @@ void map__reloc_vmlinux(struct map *map);
size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
FILE *fp);
void maps__insert(struct rb_root *maps, struct map *map);
void maps__remove(struct rb_root *maps, struct map *map);
struct map *maps__find(struct rb_root *maps, u64 addr);
struct map *maps__first(struct rb_root *maps);
void maps__insert(struct maps *maps, struct map *map);
void maps__remove(struct maps *maps, struct map *map);
struct map *maps__find(struct maps *maps, u64 addr);
struct map *maps__first(struct maps *maps);
struct map *map__next(struct map *map);
void map_groups__init(struct map_groups *mg, struct machine *machine);
void map_groups__exit(struct map_groups *mg);
......
#include "perf.h"
#include "util/util.h"
#include "util/debug.h"
#include "util/parse-options.h"
#include "util/parse-branch-options.h"
#define BRANCH_OPT(n, m) \
{ .name = n, .mode = (m) }
#define BRANCH_END { .name = NULL }
struct branch_mode {
const char *name;
int mode;
};
static const struct branch_mode branch_modes[] = {
BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
BRANCH_END
};
int
parse_branch_stack(const struct option *opt, const char *str, int unset)
{
#define ONLY_PLM \
(PERF_SAMPLE_BRANCH_USER |\
PERF_SAMPLE_BRANCH_KERNEL |\
PERF_SAMPLE_BRANCH_HV)
uint64_t *mode = (uint64_t *)opt->value;
const struct branch_mode *br;
char *s, *os = NULL, *p;
int ret = -1;
if (unset)
return 0;
/*
* cannot set it twice, -b + --branch-filter for instance
*/
if (*mode)
return -1;
/* str may be NULL in case no arg is passed to -b */
if (str) {
/* because str is read-only */
s = os = strdup(str);
if (!s)
return -1;
for (;;) {
p = strchr(s, ',');
if (p)
*p = '\0';
for (br = branch_modes; br->name; br++) {
if (!strcasecmp(s, br->name))
break;
}
if (!br->name) {
ui__warning("unknown branch filter %s,"
" check man page\n", s);
goto error;
}
*mode |= br->mode;
if (!p)
break;
s = p + 1;
}
}
ret = 0;
/* default to any branch */
if ((*mode & ~ONLY_PLM) == 0) {
*mode = PERF_SAMPLE_BRANCH_ANY;
}
error:
free(os);
return ret;
}
#ifndef _PERF_PARSE_BRANCH_OPTIONS_H
#define _PERF_PARSE_BRANCH_OPTIONS_H 1
struct option;
int parse_branch_stack(const struct option *opt, const char *str, int unset);
#endif /* _PERF_PARSE_BRANCH_OPTIONS_H */
......@@ -163,7 +163,7 @@ static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc)
static struct map *kernel_get_module_map(const char *module)
{
struct map_groups *grp = &host_machine->kmaps;
struct rb_root *maps = &grp->maps[MAP__FUNCTION];
struct maps *maps = &grp->maps[MAP__FUNCTION];
struct map *pos;
/* A file path -- this is an offline module */
......@@ -195,7 +195,7 @@ static void put_target_map(struct map *map, bool user)
{
if (map && user) {
/* Only the user map needs to be released */
map__delete(map);
map__put(map);
}
}
......@@ -1791,7 +1791,7 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
out:
if (map && !is_kprobe) {
map__delete(map);
map__put(map);
}
return ret;
......@@ -2884,7 +2884,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
end:
if (user) {
map__delete(map);
map__put(map);
}
exit_symbol_maps();
......
......@@ -972,8 +972,10 @@ int dso__load_sym(struct dso *dso, struct map *map,
map->unmap_ip = map__unmap_ip;
/* Ensure maps are correctly ordered */
if (kmaps) {
map__get(map);
map_groups__remove(kmaps, map);
map_groups__insert(kmaps, map);
map__put(map);
}
}
......
......@@ -202,12 +202,14 @@ void symbols__fixup_end(struct rb_root *symbols)
void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
{
struct rb_root *maps = &mg->maps[type];
struct maps *maps = &mg->maps[type];
struct map *next, *curr;
pthread_rwlock_wrlock(&maps->lock);
curr = maps__first(maps);
if (curr == NULL)
return;
goto out_unlock;
for (next = map__next(curr); next; next = map__next(curr)) {
curr->end = next->start;
......@@ -219,6 +221,9 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
* last map final address.
*/
curr->end = ~0ULL;
out_unlock:
pthread_rwlock_unlock(&maps->lock);
}
struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
......@@ -654,14 +659,14 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
curr_map = map_groups__find(kmaps, map->type, pos->start);
if (!curr_map || (filter && filter(curr_map, pos))) {
rb_erase(&pos->rb_node, root);
rb_erase_init(&pos->rb_node, root);
symbol__delete(pos);
} else {
pos->start -= curr_map->start - curr_map->pgoff;
if (pos->end)
pos->end -= curr_map->start - curr_map->pgoff;
if (curr_map != map) {
rb_erase(&pos->rb_node, root);
rb_erase_init(&pos->rb_node, root);
symbols__insert(
&curr_map->dso->symbols[curr_map->type],
pos);
......@@ -1168,20 +1173,23 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
/* Add new maps */
while (!list_empty(&md.maps)) {
new_map = list_entry(md.maps.next, struct map, node);
list_del(&new_map->node);
list_del_init(&new_map->node);
if (new_map == replacement_map) {
map->start = new_map->start;
map->end = new_map->end;
map->pgoff = new_map->pgoff;
map->map_ip = new_map->map_ip;
map->unmap_ip = new_map->unmap_ip;
map__delete(new_map);
/* Ensure maps are correctly ordered */
map__get(map);
map_groups__remove(kmaps, map);
map_groups__insert(kmaps, map);
map__put(map);
} else {
map_groups__insert(kmaps, new_map);
}
map__put(new_map);
}
/*
......@@ -1206,8 +1214,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
out_err:
while (!list_empty(&md.maps)) {
map = list_entry(md.maps.next, struct map, node);
list_del(&map->node);
map__delete(map);
list_del_init(&map->node);
map__put(map);
}
close(fd);
return -EINVAL;
......@@ -1520,15 +1528,21 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
struct map *map_groups__find_by_name(struct map_groups *mg,
enum map_type type, const char *name)
{
struct rb_root *maps = &mg->maps[type];
struct maps *maps = &mg->maps[type];
struct map *map;
pthread_rwlock_rdlock(&maps->lock);
for (map = maps__first(maps); map; map = map__next(map)) {
if (map->dso && strcmp(map->dso->short_name, name) == 0)
return map;
goto out_unlock;
}
return NULL;
map = NULL;
out_unlock:
pthread_rwlock_unlock(&maps->lock);
return map;
}
int dso__load_vmlinux(struct dso *dso, struct map *map,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment