Commit 56495a8a authored by Namhyung Kim's avatar Namhyung Kim Committed by Arnaldo Carvalho de Melo

perf diff: Fix output ordering to honor next column

When perf diff prints output, it sorts the entries using baseline field
by default, but entries which don't have baseline are not sorted
properly.  This patch makes it sorted by values of next column.

Before:

  # Baseline/0  Delta/1  Delta/2  Shared Object      Symbol
  # ..........  .......  .......  .................  ..........................................
  #
        32.75%   +0.28%   -0.83%  libc-2.20.so       [.] malloc
        31.50%   -0.74%   -0.23%  libc-2.20.so       [.] _int_free
        22.98%   +0.51%   +0.52%  libc-2.20.so       [.] _int_malloc
         5.70%   +0.28%   +0.30%  libc-2.20.so       [.] free
         4.38%   -0.21%   +0.25%  a.out              [.] main
         1.32%   -0.15%   +0.05%  a.out              [.] free@plt
         1.31%   +0.03%   -0.06%  a.out              [.] malloc@plt
         0.01%   -0.01%   -0.01%  [kernel.kallsyms]  [k] native_write_msr_safe
         0.01%                    [kernel.kallsyms]  [k] scheduler_tick
         0.01%            -0.00%  [kernel.kallsyms]  [k] native_read_msr_safe
                          +0.01%  [kernel.kallsyms]  [k] _raw_spin_lock_irqsave
                 +0.01%   +0.01%  [kernel.kallsyms]  [k] apic_timer_interrupt
                          +0.01%  [kernel.kallsyms]  [k] intel_pstate_timer_func
                 +0.01%           [kernel.kallsyms]  [k] perf_adjust_freq_unthr_context.part.82
                 +0.01%           [kernel.kallsyms]  [k] read_tsc
                          +0.01%  [kernel.kallsyms]  [k] timekeeping_update.constprop.8

After:

  # Baseline/0  Delta/1  Delta/2  Shared Object      Symbol
  # ..........  .......  .......  .................  ..........................................
  #
        32.75%   +0.28%   -0.83%  libc-2.20.so       [.] malloc
        31.50%   -0.74%   -0.23%  libc-2.20.so       [.] _int_free
        22.98%   +0.51%   +0.52%  libc-2.20.so       [.] _int_malloc
         5.70%   +0.28%   +0.30%  libc-2.20.so       [.] free
         4.38%   -0.21%   +0.25%  a.out              [.] main
         1.32%   -0.15%   +0.05%  a.out              [.] free@plt
         1.31%   +0.03%   -0.06%  a.out              [.] malloc@plt
         0.01%   -0.01%   -0.01%  [kernel.kallsyms]  [k] native_write_msr_safe
         0.01%                    [kernel.kallsyms]  [k] scheduler_tick
         0.01%            -0.00%  [kernel.kallsyms]  [k] native_read_msr_safe
                 +0.01%   +0.01%  [kernel.kallsyms]  [k] apic_timer_interrupt
                 +0.01%           [kernel.kallsyms]  [k] read_tsc
                 +0.01%           [kernel.kallsyms]  [k] perf_adjust_freq_unthr_context.part.82
                          +0.01%  [kernel.kallsyms]  [k] intel_pstate_timer_func
                          +0.01%  [kernel.kallsyms]  [k] _raw_spin_lock_irqsave
                          +0.01%  [kernel.kallsyms]  [k] timekeeping_update.constprop.8
Signed-off-by: default avatarNamhyung Kim <namhyung@kernel.org>
Acked-by: default avatarJiri Olsa <jolsa@kernel.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1420677949-6719-7-git-send-email-namhyung@kernel.org
[ Fixed up hist_entry__cmp_ method signatures, fallout from making previous cset buildable ]
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 87bbdf76
...@@ -456,11 +456,14 @@ static void hists__precompute(struct hists *hists) ...@@ -456,11 +456,14 @@ static void hists__precompute(struct hists *hists)
next = rb_first(root); next = rb_first(root);
while (next != NULL) { while (next != NULL) {
struct hist_entry *he, *pair; struct hist_entry *he, *pair;
struct data__file *d;
int i;
he = rb_entry(next, struct hist_entry, rb_node_in); he = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&he->rb_node_in); next = rb_next(&he->rb_node_in);
pair = get_pair_data(he, &data__files[sort_compute]); data__for_each_file_new(i, d) {
pair = get_pair_data(he, d);
if (!pair) if (!pair)
continue; continue;
...@@ -478,6 +481,7 @@ static void hists__precompute(struct hists *hists) ...@@ -478,6 +481,7 @@ static void hists__precompute(struct hists *hists)
BUG_ON(1); BUG_ON(1);
} }
} }
}
} }
static int64_t cmp_doubles(double l, double r) static int64_t cmp_doubles(double l, double r)
...@@ -525,7 +529,7 @@ __hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right, ...@@ -525,7 +529,7 @@ __hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
static int64_t static int64_t
hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right, hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
int c) int c, int sort_idx)
{ {
bool pairs_left = hist_entry__has_pairs(left); bool pairs_left = hist_entry__has_pairs(left);
bool pairs_right = hist_entry__has_pairs(right); bool pairs_right = hist_entry__has_pairs(right);
...@@ -537,8 +541,8 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right, ...@@ -537,8 +541,8 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
if (!pairs_left || !pairs_right) if (!pairs_left || !pairs_right)
return pairs_left ? -1 : 1; return pairs_left ? -1 : 1;
p_left = get_pair_data(left, &data__files[sort_compute]); p_left = get_pair_data(left, &data__files[sort_idx]);
p_right = get_pair_data(right, &data__files[sort_compute]); p_right = get_pair_data(right, &data__files[sort_idx]);
if (!p_left && !p_right) if (!p_left && !p_right)
return 0; return 0;
...@@ -565,33 +569,36 @@ static int64_t ...@@ -565,33 +569,36 @@ static int64_t
hist_entry__cmp_baseline(struct perf_hpp_fmt *fmt __maybe_unused, hist_entry__cmp_baseline(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right) struct hist_entry *left, struct hist_entry *right)
{ {
if (sort_compute)
return 0;
if (left->stat.period == right->stat.period) if (left->stat.period == right->stat.period)
return 0; return 0;
return left->stat.period > right->stat.period ? 1 : -1; return left->stat.period > right->stat.period ? 1 : -1;
} }
static int64_t static int64_t
hist_entry__cmp_delta(struct perf_hpp_fmt *fmt __maybe_unused, hist_entry__cmp_delta(struct perf_hpp_fmt *fmt,
struct hist_entry *left, struct hist_entry *right) struct hist_entry *left, struct hist_entry *right)
{ {
return hist_entry__cmp_compute(right, left, COMPUTE_DELTA); struct data__file *d = fmt_to_data_file(fmt);
return hist_entry__cmp_compute(right, left, COMPUTE_DELTA, d->idx);
} }
static int64_t static int64_t
hist_entry__cmp_ratio(struct perf_hpp_fmt *fmt __maybe_unused, hist_entry__cmp_ratio(struct perf_hpp_fmt *fmt,
struct hist_entry *left, struct hist_entry *right) struct hist_entry *left, struct hist_entry *right)
{ {
return hist_entry__cmp_compute(right, left, COMPUTE_RATIO); struct data__file *d = fmt_to_data_file(fmt);
return hist_entry__cmp_compute(right, left, COMPUTE_RATIO, d->idx);
} }
static int64_t static int64_t
hist_entry__cmp_wdiff(struct perf_hpp_fmt *fmt __maybe_unused, hist_entry__cmp_wdiff(struct perf_hpp_fmt *fmt,
struct hist_entry *left, struct hist_entry *right) struct hist_entry *left, struct hist_entry *right)
{ {
return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF); struct data__file *d = fmt_to_data_file(fmt);
return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF, d->idx);
} }
static void hists__process(struct hists *hists) static void hists__process(struct hists *hists)
...@@ -599,9 +606,7 @@ static void hists__process(struct hists *hists) ...@@ -599,9 +606,7 @@ static void hists__process(struct hists *hists)
if (show_baseline_only) if (show_baseline_only)
hists__baseline_only(hists); hists__baseline_only(hists);
if (sort_compute)
hists__precompute(hists); hists__precompute(hists);
hists__output_resort(hists, NULL); hists__output_resort(hists, NULL);
hists__fprintf(hists, true, 0, 0, 0, stdout); hists__fprintf(hists, true, 0, 0, 0, stdout);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment