Commit d5b76bef authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "A kernel crash fix plus three tooling fixes"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/core: Fix crash in perf_event_read()
  perf callchain: Reference count maps
  perf diff: Fix -o/--order option behavior (again)
  perf diff: Fix segfault on 'perf diff -o N' option
parents 4e4f74a7 451d24d1
...@@ -3487,14 +3487,15 @@ struct perf_read_data { ...@@ -3487,14 +3487,15 @@ struct perf_read_data {
int ret; int ret;
}; };
static int find_cpu_to_read(struct perf_event *event, int local_cpu) static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
{ {
int event_cpu = event->oncpu;
u16 local_pkg, event_pkg; u16 local_pkg, event_pkg;
if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
event_pkg = topology_physical_package_id(event_cpu); int local_cpu = smp_processor_id();
local_pkg = topology_physical_package_id(local_cpu);
event_pkg = topology_physical_package_id(event_cpu);
local_pkg = topology_physical_package_id(local_cpu);
if (event_pkg == local_pkg) if (event_pkg == local_pkg)
return local_cpu; return local_cpu;
...@@ -3624,7 +3625,7 @@ u64 perf_event_read_local(struct perf_event *event) ...@@ -3624,7 +3625,7 @@ u64 perf_event_read_local(struct perf_event *event)
static int perf_event_read(struct perf_event *event, bool group) static int perf_event_read(struct perf_event *event, bool group)
{ {
int ret = 0, cpu_to_read, local_cpu; int event_cpu, ret = 0;
/* /*
* If event is enabled and currently active on a CPU, update the * If event is enabled and currently active on a CPU, update the
...@@ -3637,21 +3638,25 @@ static int perf_event_read(struct perf_event *event, bool group) ...@@ -3637,21 +3638,25 @@ static int perf_event_read(struct perf_event *event, bool group)
.ret = 0, .ret = 0,
}; };
local_cpu = get_cpu(); event_cpu = READ_ONCE(event->oncpu);
cpu_to_read = find_cpu_to_read(event, local_cpu); if ((unsigned)event_cpu >= nr_cpu_ids)
put_cpu(); return 0;
preempt_disable();
event_cpu = __perf_event_read_cpu(event, event_cpu);
/* /*
* Purposely ignore the smp_call_function_single() return * Purposely ignore the smp_call_function_single() return
* value. * value.
* *
* If event->oncpu isn't a valid CPU it means the event got * If event_cpu isn't a valid CPU it means the event got
* scheduled out and that will have updated the event count. * scheduled out and that will have updated the event count.
* *
* Therefore, either way, we'll have an up-to-date event count * Therefore, either way, we'll have an up-to-date event count
* after this. * after this.
*/ */
(void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1); (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
preempt_enable();
ret = data.ret; ret = data.ret;
} else if (event->state == PERF_EVENT_STATE_INACTIVE) { } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx; struct perf_event_context *ctx = event->ctx;
......
...@@ -1199,7 +1199,7 @@ static int ui_init(void) ...@@ -1199,7 +1199,7 @@ static int ui_init(void)
BUG_ON(1); BUG_ON(1);
} }
perf_hpp__register_sort_field(fmt); perf_hpp__prepend_sort_field(fmt);
return 0; return 0;
} }
......
...@@ -521,6 +521,12 @@ void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, ...@@ -521,6 +521,12 @@ void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
list_add_tail(&format->sort_list, &list->sorts); list_add_tail(&format->sort_list, &list->sorts);
} }
void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
struct perf_hpp_fmt *format)
{
list_add(&format->sort_list, &list->sorts);
}
void perf_hpp__column_unregister(struct perf_hpp_fmt *format) void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
{ {
list_del(&format->list); list_del(&format->list);
...@@ -560,6 +566,10 @@ void perf_hpp__setup_output_field(struct perf_hpp_list *list) ...@@ -560,6 +566,10 @@ void perf_hpp__setup_output_field(struct perf_hpp_list *list)
perf_hpp_list__for_each_sort_list(list, fmt) { perf_hpp_list__for_each_sort_list(list, fmt) {
struct perf_hpp_fmt *pos; struct perf_hpp_fmt *pos;
/* skip sort-only fields ("sort_compute" in perf diff) */
if (!fmt->entry && !fmt->color)
continue;
perf_hpp_list__for_each_format(list, pos) { perf_hpp_list__for_each_format(list, pos) {
if (fmt_equal(fmt, pos)) if (fmt_equal(fmt, pos))
goto next; goto next;
......
...@@ -437,7 +437,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor) ...@@ -437,7 +437,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
} }
call->ip = cursor_node->ip; call->ip = cursor_node->ip;
call->ms.sym = cursor_node->sym; call->ms.sym = cursor_node->sym;
call->ms.map = cursor_node->map; call->ms.map = map__get(cursor_node->map);
if (cursor_node->branch) { if (cursor_node->branch) {
call->branch_count = 1; call->branch_count = 1;
...@@ -477,6 +477,7 @@ add_child(struct callchain_node *parent, ...@@ -477,6 +477,7 @@ add_child(struct callchain_node *parent,
list_for_each_entry_safe(call, tmp, &new->val, list) { list_for_each_entry_safe(call, tmp, &new->val, list) {
list_del(&call->list); list_del(&call->list);
map__zput(call->ms.map);
free(call); free(call);
} }
free(new); free(new);
...@@ -761,6 +762,7 @@ merge_chain_branch(struct callchain_cursor *cursor, ...@@ -761,6 +762,7 @@ merge_chain_branch(struct callchain_cursor *cursor,
list->ms.map, list->ms.sym, list->ms.map, list->ms.sym,
false, NULL, 0, 0); false, NULL, 0, 0);
list_del(&list->list); list_del(&list->list);
map__zput(list->ms.map);
free(list); free(list);
} }
...@@ -811,7 +813,8 @@ int callchain_cursor_append(struct callchain_cursor *cursor, ...@@ -811,7 +813,8 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
} }
node->ip = ip; node->ip = ip;
node->map = map; map__zput(node->map);
node->map = map__get(map);
node->sym = sym; node->sym = sym;
node->branch = branch; node->branch = branch;
node->nr_loop_iter = nr_loop_iter; node->nr_loop_iter = nr_loop_iter;
...@@ -1142,11 +1145,13 @@ static void free_callchain_node(struct callchain_node *node) ...@@ -1142,11 +1145,13 @@ static void free_callchain_node(struct callchain_node *node)
list_for_each_entry_safe(list, tmp, &node->parent_val, list) { list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
list_del(&list->list); list_del(&list->list);
map__zput(list->ms.map);
free(list); free(list);
} }
list_for_each_entry_safe(list, tmp, &node->val, list) { list_for_each_entry_safe(list, tmp, &node->val, list) {
list_del(&list->list); list_del(&list->list);
map__zput(list->ms.map);
free(list); free(list);
} }
...@@ -1210,6 +1215,7 @@ int callchain_node__make_parent_list(struct callchain_node *node) ...@@ -1210,6 +1215,7 @@ int callchain_node__make_parent_list(struct callchain_node *node)
goto out; goto out;
*new = *chain; *new = *chain;
new->has_children = false; new->has_children = false;
map__get(new->ms.map);
list_add_tail(&new->list, &head); list_add_tail(&new->list, &head);
} }
parent = parent->parent; parent = parent->parent;
...@@ -1230,6 +1236,7 @@ int callchain_node__make_parent_list(struct callchain_node *node) ...@@ -1230,6 +1236,7 @@ int callchain_node__make_parent_list(struct callchain_node *node)
out: out:
list_for_each_entry_safe(chain, new, &head, list) { list_for_each_entry_safe(chain, new, &head, list) {
list_del(&chain->list); list_del(&chain->list);
map__zput(chain->ms.map);
free(chain); free(chain);
} }
return -ENOMEM; return -ENOMEM;
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include "event.h" #include "event.h"
#include "map.h"
#include "symbol.h" #include "symbol.h"
#define HELP_PAD "\t\t\t\t" #define HELP_PAD "\t\t\t\t"
...@@ -184,8 +185,13 @@ int callchain_merge(struct callchain_cursor *cursor, ...@@ -184,8 +185,13 @@ int callchain_merge(struct callchain_cursor *cursor,
*/ */
static inline void callchain_cursor_reset(struct callchain_cursor *cursor) static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
{ {
struct callchain_cursor_node *node;
cursor->nr = 0; cursor->nr = 0;
cursor->last = &cursor->first; cursor->last = &cursor->first;
for (node = cursor->first; node != NULL; node = node->next)
map__zput(node->map);
} }
int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip, int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
......
#include "util.h" #include "util.h"
#include "build-id.h" #include "build-id.h"
#include "hist.h" #include "hist.h"
#include "map.h"
#include "session.h" #include "session.h"
#include "sort.h" #include "sort.h"
#include "evlist.h" #include "evlist.h"
...@@ -1019,6 +1020,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, ...@@ -1019,6 +1020,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
int max_stack_depth, void *arg) int max_stack_depth, void *arg)
{ {
int err, err2; int err, err2;
struct map *alm = NULL;
if (al && al->map)
alm = map__get(al->map);
err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
iter->evsel, al, max_stack_depth); iter->evsel, al, max_stack_depth);
...@@ -1058,6 +1063,8 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, ...@@ -1058,6 +1063,8 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
if (!err) if (!err)
err = err2; err = err2;
map__put(alm);
return err; return err;
} }
......
...@@ -283,6 +283,8 @@ void perf_hpp_list__column_register(struct perf_hpp_list *list, ...@@ -283,6 +283,8 @@ void perf_hpp_list__column_register(struct perf_hpp_list *list,
struct perf_hpp_fmt *format); struct perf_hpp_fmt *format);
void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
struct perf_hpp_fmt *format); struct perf_hpp_fmt *format);
void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
struct perf_hpp_fmt *format);
static inline void perf_hpp__column_register(struct perf_hpp_fmt *format) static inline void perf_hpp__column_register(struct perf_hpp_fmt *format)
{ {
...@@ -294,6 +296,11 @@ static inline void perf_hpp__register_sort_field(struct perf_hpp_fmt *format) ...@@ -294,6 +296,11 @@ static inline void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
perf_hpp_list__register_sort_field(&perf_hpp_list, format); perf_hpp_list__register_sort_field(&perf_hpp_list, format);
} }
static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format)
{
perf_hpp_list__prepend_sort_field(&perf_hpp_list, format);
}
#define perf_hpp_list__for_each_format(_list, format) \ #define perf_hpp_list__for_each_format(_list, format) \
list_for_each_entry(format, &(_list)->fields, list) list_for_each_entry(format, &(_list)->fields, list)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment