Commit f57a6414 authored by Leo Yan's avatar Leo Yan Committed by Arnaldo Carvalho de Melo

perf kvm: Use histograms list to replace cached list

perf kvm tool defines its own cached list which is managed with RB tree,
histograms also provide RB tree to manage data entries.  Since now we
have introduced histograms in the tool, it's not necessary to use the
self defined list and we can directly use histograms list to manage
KVM events.

This patch changes to use histograms list to track KVM events, and it
invokes the common function hists__output_resort_cb() to sort result,
this also give us flexibility to extend more sorting key words easily.

After histograms list supported, the cached list is redundant so remove
the relevant code for it.

Committer notes:

kvm_hists__reinit() is only used by functions enclosed in:

  #if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)

So do it with this new function as well.
Signed-off-by: default avatarLeo Yan <leo.yan@linaro.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-arm-kernel@lists.infradead.org
Link: https://lore.kernel.org/r/20230315145112.186603-2-leo.yan@linaro.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 41f1138e
...@@ -323,6 +323,12 @@ static int kvm_hists__init(void) ...@@ -323,6 +323,12 @@ static int kvm_hists__init(void)
perf_hpp_list__init(&kvm_hists.list); perf_hpp_list__init(&kvm_hists.list);
return kvm_hpp_list__parse(&kvm_hists.list, NULL, "ev_name"); return kvm_hpp_list__parse(&kvm_hists.list, NULL, "ev_name");
} }
static int kvm_hists__reinit(const char *output, const char *sort)
{
perf_hpp__reset_output_field(&kvm_hists.list);
return kvm_hpp_list__parse(&kvm_hists.list, output, sort);
}
#endif // defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT) #endif // defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
static const char *get_filename_for_perf_kvm(void) static const char *get_filename_for_perf_kvm(void)
...@@ -422,44 +428,37 @@ struct vcpu_event_record { ...@@ -422,44 +428,37 @@ struct vcpu_event_record {
struct kvm_event *last_event; struct kvm_event *last_event;
}; };
static void init_kvm_event_record(struct perf_kvm_stat *kvm)
{
unsigned int i;
for (i = 0; i < EVENTS_CACHE_SIZE; i++)
INIT_LIST_HEAD(&kvm->kvm_events_cache[i]);
}
#ifdef HAVE_TIMERFD_SUPPORT #ifdef HAVE_TIMERFD_SUPPORT
static void clear_events_cache_stats(struct list_head *kvm_events_cache) static void clear_events_cache_stats(void)
{ {
struct list_head *head; struct rb_root_cached *root;
struct rb_node *nd;
struct kvm_event *event; struct kvm_event *event;
unsigned int i; int i;
int j;
if (hists__has(&kvm_hists.hists, need_collapse))
root = &kvm_hists.hists.entries_collapsed;
else
root = kvm_hists.hists.entries_in;
for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
struct hist_entry *he;
he = rb_entry(nd, struct hist_entry, rb_node_in);
event = container_of(he, struct kvm_event, he);
for (i = 0; i < EVENTS_CACHE_SIZE; i++) {
head = &kvm_events_cache[i];
list_for_each_entry(event, head, hash_entry) {
/* reset stats for event */ /* reset stats for event */
event->total.time = 0; event->total.time = 0;
init_stats(&event->total.stats); init_stats(&event->total.stats);
for (j = 0; j < event->max_vcpu; ++j) { for (i = 0; i < event->max_vcpu; ++i) {
event->vcpu[j].time = 0; event->vcpu[i].time = 0;
init_stats(&event->vcpu[j].stats); init_stats(&event->vcpu[i].stats);
}
} }
} }
} }
#endif #endif
static int kvm_events_hash_fn(u64 key)
{
return key & (EVENTS_CACHE_SIZE - 1);
}
static bool kvm_event_expand(struct kvm_event *event, int vcpu_id) static bool kvm_event_expand(struct kvm_event *event, int vcpu_id)
{ {
int old_max_vcpu = event->max_vcpu; int old_max_vcpu = event->max_vcpu;
...@@ -485,44 +484,64 @@ static bool kvm_event_expand(struct kvm_event *event, int vcpu_id) ...@@ -485,44 +484,64 @@ static bool kvm_event_expand(struct kvm_event *event, int vcpu_id)
return true; return true;
} }
static struct kvm_event *kvm_alloc_init_event(struct perf_kvm_stat *kvm, static void *kvm_he_zalloc(size_t size)
struct event_key *key,
struct perf_sample *sample __maybe_unused)
{ {
struct kvm_event *event; struct kvm_event *kvm_ev;
event = zalloc(sizeof(*event)); kvm_ev = zalloc(size + sizeof(*kvm_ev));
if (!event) { if (!kvm_ev)
pr_err("Not enough memory\n");
return NULL; return NULL;
}
event->perf_kvm = kvm; init_stats(&kvm_ev->total.stats);
event->key = *key; hists__inc_nr_samples(&kvm_hists.hists, 0);
init_stats(&event->total.stats); return &kvm_ev->he;
return event; }
static void kvm_he_free(void *he)
{
struct kvm_event *kvm_ev;
free(((struct hist_entry *)he)->kvm_info);
kvm_ev = container_of(he, struct kvm_event, he);
free(kvm_ev);
} }
static struct hist_entry_ops kvm_ev_entry_ops = {
.new = kvm_he_zalloc,
.free = kvm_he_free,
};
static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm, static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm,
struct event_key *key, struct event_key *key,
struct perf_sample *sample) struct perf_sample *sample)
{ {
struct kvm_event *event; struct kvm_event *event;
struct list_head *head; struct hist_entry *he;
struct kvm_info *ki;
BUG_ON(key->key == INVALID_KEY); BUG_ON(key->key == INVALID_KEY);
head = &kvm->kvm_events_cache[kvm_events_hash_fn(key->key)]; ki = zalloc(sizeof(*ki));
list_for_each_entry(event, head, hash_entry) { if (!ki) {
if (event->key.key == key->key && event->key.info == key->info) pr_err("Failed to allocate kvm info\n");
return event; return NULL;
} }
event = kvm_alloc_init_event(kvm, key, sample); kvm->events_ops->decode_key(kvm, key, ki->name);
if (!event) he = hists__add_entry_ops(&kvm_hists.hists, &kvm_ev_entry_ops,
&kvm->al, NULL, NULL, NULL, ki, sample, true);
if (he == NULL) {
pr_err("Failed to allocate hist entry\n");
free(ki);
return NULL; return NULL;
}
event = container_of(he, struct kvm_event, he);
if (!event->perf_kvm) {
event->perf_kvm = kvm;
event->key = *key;
}
list_add(&event->hash_entry, head);
return event; return event;
} }
...@@ -755,58 +774,32 @@ static bool select_key(struct perf_kvm_stat *kvm) ...@@ -755,58 +774,32 @@ static bool select_key(struct perf_kvm_stat *kvm)
return false; return false;
} }
static void insert_to_result(struct rb_root *result, struct kvm_event *event,
key_cmp_fun bigger, int vcpu)
{
struct rb_node **rb = &result->rb_node;
struct rb_node *parent = NULL;
struct kvm_event *p;
while (*rb) {
p = container_of(*rb, struct kvm_event, rb);
parent = *rb;
if (bigger(event, p, vcpu) > 0)
rb = &(*rb)->rb_left;
else
rb = &(*rb)->rb_right;
}
rb_link_node(&event->rb, parent, rb);
rb_insert_color(&event->rb, result);
}
static bool event_is_valid(struct kvm_event *event, int vcpu) static bool event_is_valid(struct kvm_event *event, int vcpu)
{ {
return !!get_event_count(event, vcpu); return !!get_event_count(event, vcpu);
} }
static void sort_result(struct perf_kvm_stat *kvm) static int filter_cb(struct hist_entry *he, void *arg __maybe_unused)
{ {
unsigned int i;
int vcpu = kvm->trace_vcpu;
struct kvm_event *event; struct kvm_event *event;
struct perf_kvm_stat *perf_kvm;
for (i = 0; i < EVENTS_CACHE_SIZE; i++) { event = container_of(he, struct kvm_event, he);
list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) { perf_kvm = event->perf_kvm;
if (event_is_valid(event, vcpu)) { if (!event_is_valid(event, perf_kvm->trace_vcpu))
insert_to_result(&kvm->result, event, he->filtered = 1;
kvm->compare, vcpu); else
} he->filtered = 0;
} return 0;
}
} }
/* returns left most element of result, and erase it */ static void sort_result(struct perf_kvm_stat *kvm)
static struct kvm_event *pop_from_result(struct rb_root *result)
{ {
struct rb_node *node = rb_first(result); const char *output_columns = "ev_name,sample,time,max_t,min_t,mean_t";
if (!node)
return NULL;
rb_erase(node, result); kvm_hists__reinit(output_columns, kvm->sort_key);
return container_of(node, struct kvm_event, rb); hists__collapse_resort(&kvm_hists.hists, NULL);
hists__output_resort_cb(&kvm_hists.hists, NULL, filter_cb);
} }
static void print_vcpu_info(struct perf_kvm_stat *kvm) static void print_vcpu_info(struct perf_kvm_stat *kvm)
...@@ -849,6 +842,7 @@ static void print_result(struct perf_kvm_stat *kvm) ...@@ -849,6 +842,7 @@ static void print_result(struct perf_kvm_stat *kvm)
char decode[KVM_EVENT_NAME_LEN]; char decode[KVM_EVENT_NAME_LEN];
struct kvm_event *event; struct kvm_event *event;
int vcpu = kvm->trace_vcpu; int vcpu = kvm->trace_vcpu;
struct rb_node *nd;
if (kvm->live) { if (kvm->live) {
puts(CONSOLE_CLEAR); puts(CONSOLE_CLEAR);
...@@ -867,9 +861,15 @@ static void print_result(struct perf_kvm_stat *kvm) ...@@ -867,9 +861,15 @@ static void print_result(struct perf_kvm_stat *kvm)
pr_info("%16s ", "Avg time"); pr_info("%16s ", "Avg time");
pr_info("\n\n"); pr_info("\n\n");
while ((event = pop_from_result(&kvm->result))) { for (nd = rb_first_cached(&kvm_hists.hists.entries); nd; nd = rb_next(nd)) {
struct hist_entry *he;
u64 ecount, etime, max, min; u64 ecount, etime, max, min;
he = rb_entry(nd, struct hist_entry, rb_node);
if (he->filtered)
continue;
event = container_of(he, struct kvm_event, he);
ecount = get_event_count(event, vcpu); ecount = get_event_count(event, vcpu);
etime = get_event_time(event, vcpu); etime = get_event_time(event, vcpu);
max = get_event_max(event, vcpu); max = get_event_max(event, vcpu);
...@@ -1146,8 +1146,11 @@ static int perf_kvm__handle_timerfd(struct perf_kvm_stat *kvm) ...@@ -1146,8 +1146,11 @@ static int perf_kvm__handle_timerfd(struct perf_kvm_stat *kvm)
sort_result(kvm); sort_result(kvm);
print_result(kvm); print_result(kvm);
/* Reset sort list to "ev_name" */
kvm_hists__reinit(NULL, "ev_name");
/* reset counts */ /* reset counts */
clear_events_cache_stats(kvm->kvm_events_cache); clear_events_cache_stats();
kvm->total_count = 0; kvm->total_count = 0;
kvm->total_time = 0; kvm->total_time = 0;
kvm->lost_events = 0; kvm->lost_events = 0;
...@@ -1203,7 +1206,6 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm) ...@@ -1203,7 +1206,6 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm)
} }
set_term_quiet_input(&save); set_term_quiet_input(&save);
init_kvm_event_record(kvm);
kvm_hists__init(); kvm_hists__init();
...@@ -1399,7 +1401,6 @@ static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm) ...@@ -1399,7 +1401,6 @@ static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm)
if (!register_kvm_events_ops(kvm)) if (!register_kvm_events_ops(kvm))
goto exit; goto exit;
init_kvm_event_record(kvm);
setup_pager(); setup_pager();
kvm_hists__init(); kvm_hists__init();
......
...@@ -36,7 +36,6 @@ struct perf_kvm_stat; ...@@ -36,7 +36,6 @@ struct perf_kvm_stat;
struct kvm_event { struct kvm_event {
struct list_head hash_entry; struct list_head hash_entry;
struct rb_node rb;
struct perf_kvm_stat *perf_kvm; struct perf_kvm_stat *perf_kvm;
struct event_key key; struct event_key key;
...@@ -81,9 +80,6 @@ struct exit_reasons_table { ...@@ -81,9 +80,6 @@ struct exit_reasons_table {
const char *reason; const char *reason;
}; };
#define EVENTS_BITS 12
#define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS)
struct perf_kvm_stat { struct perf_kvm_stat {
struct perf_tool tool; struct perf_tool tool;
struct record_opts opts; struct record_opts opts;
...@@ -103,7 +99,6 @@ struct perf_kvm_stat { ...@@ -103,7 +99,6 @@ struct perf_kvm_stat {
struct kvm_events_ops *events_ops; struct kvm_events_ops *events_ops;
key_cmp_fun compare; key_cmp_fun compare;
struct list_head kvm_events_cache[EVENTS_CACHE_SIZE];
u64 total_time; u64 total_time;
u64 total_count; u64 total_count;
...@@ -112,8 +107,6 @@ struct perf_kvm_stat { ...@@ -112,8 +107,6 @@ struct perf_kvm_stat {
struct intlist *pid_list; struct intlist *pid_list;
struct rb_root result;
int timerfd; int timerfd;
unsigned int display_time; unsigned int display_time;
bool live; bool live;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment