perf evsel: Steal the counter reading routines from stat

Making them hopefully generic enough to be used in 'perf test',
well see.

Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 70d544d0
...@@ -93,12 +93,6 @@ static const char *cpu_list; ...@@ -93,12 +93,6 @@ static const char *cpu_list;
static const char *csv_sep = NULL; static const char *csv_sep = NULL;
static bool csv_output = false; static bool csv_output = false;
struct cpu_counts {
u64 val;
u64 ena;
u64 run;
};
static volatile int done = 0; static volatile int done = 0;
struct stats struct stats
...@@ -108,15 +102,11 @@ struct stats ...@@ -108,15 +102,11 @@ struct stats
struct perf_stat { struct perf_stat {
struct stats res_stats[3]; struct stats res_stats[3];
int scaled;
struct cpu_counts cpu_counts[];
}; };
static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel, int ncpus) static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
{ {
size_t priv_size = (sizeof(struct perf_stat) + evsel->priv = zalloc(sizeof(struct perf_stat));
(ncpus * sizeof(struct cpu_counts)));
evsel->priv = zalloc(priv_size);
return evsel->priv == NULL ? -ENOMEM : 0; return evsel->priv == NULL ? -ENOMEM : 0;
} }
...@@ -238,52 +228,14 @@ static inline int nsec_counter(struct perf_evsel *evsel) ...@@ -238,52 +228,14 @@ static inline int nsec_counter(struct perf_evsel *evsel)
* Read out the results of a single counter: * Read out the results of a single counter:
* aggregate counts across CPUs in system-wide mode * aggregate counts across CPUs in system-wide mode
*/ */
static void read_counter_aggr(struct perf_evsel *counter) static int read_counter_aggr(struct perf_evsel *counter)
{ {
struct perf_stat *ps = counter->priv; struct perf_stat *ps = counter->priv;
u64 count[3], single_count[3]; u64 *count = counter->counts->aggr.values;
int cpu; int i;
size_t res, nv;
int scaled;
int i, thread;
count[0] = count[1] = count[2] = 0;
nv = scale ? 3 : 1;
for (cpu = 0; cpu < nr_cpus; cpu++) {
for (thread = 0; thread < thread_num; thread++) {
if (FD(counter, cpu, thread) < 0)
continue;
res = read(FD(counter, cpu, thread),
single_count, nv * sizeof(u64));
assert(res == nv * sizeof(u64));
close(FD(counter, cpu, thread));
FD(counter, cpu, thread) = -1;
count[0] += single_count[0];
if (scale) {
count[1] += single_count[1];
count[2] += single_count[2];
}
}
}
scaled = 0;
if (scale) {
if (count[2] == 0) {
ps->scaled = -1;
count[0] = 0;
return;
}
if (count[2] < count[1]) { if (__perf_evsel__read(counter, nr_cpus, thread_num, scale) < 0)
ps->scaled = 1; return -1;
count[0] = (unsigned long long)
((double)count[0] * count[1] / count[2] + 0.5);
}
}
for (i = 0; i < 3; i++) for (i = 0; i < 3; i++)
update_stats(&ps->res_stats[i], count[i]); update_stats(&ps->res_stats[i], count[i]);
...@@ -302,46 +254,24 @@ static void read_counter_aggr(struct perf_evsel *counter) ...@@ -302,46 +254,24 @@ static void read_counter_aggr(struct perf_evsel *counter)
update_stats(&runtime_cycles_stats[0], count[0]); update_stats(&runtime_cycles_stats[0], count[0]);
if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
update_stats(&runtime_branches_stats[0], count[0]); update_stats(&runtime_branches_stats[0], count[0]);
return 0;
} }
/* /*
* Read out the results of a single counter: * Read out the results of a single counter:
* do not aggregate counts across CPUs in system-wide mode * do not aggregate counts across CPUs in system-wide mode
*/ */
static void read_counter(struct perf_evsel *counter) static int read_counter(struct perf_evsel *counter)
{ {
struct cpu_counts *cpu_counts = counter->priv; u64 *count;
u64 count[3];
int cpu; int cpu;
size_t res, nv;
count[0] = count[1] = count[2] = 0;
nv = scale ? 3 : 1;
for (cpu = 0; cpu < nr_cpus; cpu++) { for (cpu = 0; cpu < nr_cpus; cpu++) {
if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0)
return -1;
if (FD(counter, cpu, 0) < 0) count = counter->counts->cpu[cpu].values;
continue;
res = read(FD(counter, cpu, 0), count, nv * sizeof(u64));
assert(res == nv * sizeof(u64));
close(FD(counter, cpu, 0));
FD(counter, cpu, 0) = -1;
if (scale) {
if (count[2] == 0) {
count[0] = 0;
} else if (count[2] < count[1]) {
count[0] = (unsigned long long)
((double)count[0] * count[1] / count[2] + 0.5);
}
}
cpu_counts[cpu].val = count[0]; /* scaled count */
cpu_counts[cpu].ena = count[1];
cpu_counts[cpu].run = count[2];
if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
update_stats(&runtime_nsecs_stats[cpu], count[0]); update_stats(&runtime_nsecs_stats[cpu], count[0]);
...@@ -350,6 +280,8 @@ static void read_counter(struct perf_evsel *counter) ...@@ -350,6 +280,8 @@ static void read_counter(struct perf_evsel *counter)
if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
update_stats(&runtime_branches_stats[cpu], count[0]); update_stats(&runtime_branches_stats[cpu], count[0]);
} }
return 0;
} }
static int run_perf_stat(int argc __used, const char **argv) static int run_perf_stat(int argc __used, const char **argv)
...@@ -449,12 +381,17 @@ static int run_perf_stat(int argc __used, const char **argv) ...@@ -449,12 +381,17 @@ static int run_perf_stat(int argc __used, const char **argv)
update_stats(&walltime_nsecs_stats, t1 - t0); update_stats(&walltime_nsecs_stats, t1 - t0);
if (no_aggr) { if (no_aggr) {
list_for_each_entry(counter, &evsel_list, node) list_for_each_entry(counter, &evsel_list, node) {
read_counter(counter); read_counter(counter);
perf_evsel__close_fd(counter, nr_cpus, 1);
}
} else { } else {
list_for_each_entry(counter, &evsel_list, node) list_for_each_entry(counter, &evsel_list, node) {
read_counter_aggr(counter); read_counter_aggr(counter);
perf_evsel__close_fd(counter, nr_cpus, thread_num);
}
} }
return WEXITSTATUS(status); return WEXITSTATUS(status);
} }
...@@ -550,7 +487,7 @@ static void print_counter_aggr(struct perf_evsel *counter) ...@@ -550,7 +487,7 @@ static void print_counter_aggr(struct perf_evsel *counter)
{ {
struct perf_stat *ps = counter->priv; struct perf_stat *ps = counter->priv;
double avg = avg_stats(&ps->res_stats[0]); double avg = avg_stats(&ps->res_stats[0]);
int scaled = ps->scaled; int scaled = counter->counts->scaled;
if (scaled == -1) { if (scaled == -1) {
fprintf(stderr, "%*s%s%-24s\n", fprintf(stderr, "%*s%s%-24s\n",
...@@ -590,14 +527,13 @@ static void print_counter_aggr(struct perf_evsel *counter) ...@@ -590,14 +527,13 @@ static void print_counter_aggr(struct perf_evsel *counter)
*/ */
static void print_counter(struct perf_evsel *counter) static void print_counter(struct perf_evsel *counter)
{ {
struct perf_stat *ps = counter->priv;
u64 ena, run, val; u64 ena, run, val;
int cpu; int cpu;
for (cpu = 0; cpu < nr_cpus; cpu++) { for (cpu = 0; cpu < nr_cpus; cpu++) {
val = ps->cpu_counts[cpu].val; val = counter->counts->cpu[cpu].val;
ena = ps->cpu_counts[cpu].ena; ena = counter->counts->cpu[cpu].ena;
run = ps->cpu_counts[cpu].run; run = counter->counts->cpu[cpu].run;
if (run == 0 || ena == 0) { if (run == 0 || ena == 0) {
fprintf(stderr, "CPU%*d%s%*s%s%-24s", fprintf(stderr, "CPU%*d%s%*s%s%-24s",
csv_output ? 0 : -4, csv_output ? 0 : -4,
...@@ -818,7 +754,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) ...@@ -818,7 +754,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
} }
list_for_each_entry(pos, &evsel_list, node) { list_for_each_entry(pos, &evsel_list, node) {
if (perf_evsel__alloc_stat_priv(pos, nr_cpus) < 0 || if (perf_evsel__alloc_stat_priv(pos) < 0 ||
perf_evsel__alloc_counts(pos, nr_cpus) < 0 ||
perf_evsel__alloc_fd(pos, nr_cpus, thread_num) < 0) perf_evsel__alloc_fd(pos, nr_cpus, thread_num) < 0)
goto out_free_fd; goto out_free_fd;
} }
......
#include "evsel.h" #include "evsel.h"
#include "util.h" #include "util.h"
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx) struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx)
{ {
struct perf_evsel *evsel = zalloc(sizeof(*evsel)); struct perf_evsel *evsel = zalloc(sizeof(*evsel));
...@@ -21,15 +23,101 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) ...@@ -21,15 +23,101 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
return evsel->fd != NULL ? 0 : -ENOMEM; return evsel->fd != NULL ? 0 : -ENOMEM;
} }
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
{
evsel->counts = zalloc((sizeof(*evsel->counts) +
(ncpus * sizeof(struct perf_counts_values))));
return evsel->counts != NULL ? 0 : -ENOMEM;
}
void perf_evsel__free_fd(struct perf_evsel *evsel) void perf_evsel__free_fd(struct perf_evsel *evsel)
{ {
xyarray__delete(evsel->fd); xyarray__delete(evsel->fd);
evsel->fd = NULL; evsel->fd = NULL;
} }
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
int cpu, thread;
for (cpu = 0; cpu < ncpus; cpu++)
for (thread = 0; thread < nthreads; ++thread) {
close(FD(evsel, cpu, thread));
FD(evsel, cpu, thread) = -1;
}
}
void perf_evsel__delete(struct perf_evsel *evsel) void perf_evsel__delete(struct perf_evsel *evsel)
{ {
assert(list_empty(&evsel->node)); assert(list_empty(&evsel->node));
xyarray__delete(evsel->fd); xyarray__delete(evsel->fd);
free(evsel); free(evsel);
} }
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
int cpu, int thread, bool scale)
{
struct perf_counts_values count;
size_t nv = scale ? 3 : 1;
if (FD(evsel, cpu, thread) < 0)
return -EINVAL;
if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
return -errno;
if (scale) {
if (count.run == 0)
count.val = 0;
else if (count.run < count.ena)
count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
} else
count.ena = count.run = 0;
evsel->counts->cpu[cpu] = count;
return 0;
}
int __perf_evsel__read(struct perf_evsel *evsel,
int ncpus, int nthreads, bool scale)
{
size_t nv = scale ? 3 : 1;
int cpu, thread;
struct perf_counts_values *aggr = &evsel->counts->aggr, count;
aggr->val = 0;
for (cpu = 0; cpu < ncpus; cpu++) {
for (thread = 0; thread < nthreads; thread++) {
if (FD(evsel, cpu, thread) < 0)
continue;
if (readn(FD(evsel, cpu, thread),
&count, nv * sizeof(u64)) < 0)
return -errno;
aggr->val += count.val;
if (scale) {
aggr->ena += count.ena;
aggr->run += count.run;
}
}
}
evsel->counts->scaled = 0;
if (scale) {
if (aggr->run == 0) {
evsel->counts->scaled = -1;
aggr->val = 0;
return 0;
}
if (aggr->run < aggr->ena) {
evsel->counts->scaled = 1;
aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
}
} else
aggr->ena = aggr->run = 0;
return 0;
}
...@@ -2,15 +2,34 @@ ...@@ -2,15 +2,34 @@
#define __PERF_EVSEL_H 1 #define __PERF_EVSEL_H 1
#include <linux/list.h> #include <linux/list.h>
#include <stdbool.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include "types.h" #include "types.h"
#include "xyarray.h" #include "xyarray.h"
struct perf_counts_values {
union {
struct {
u64 val;
u64 ena;
u64 run;
};
u64 values[3];
};
};
struct perf_counts {
s8 scaled;
struct perf_counts_values aggr;
struct perf_counts_values cpu[];
};
struct perf_evsel { struct perf_evsel {
struct list_head node; struct list_head node;
struct perf_event_attr attr; struct perf_event_attr attr;
char *filter; char *filter;
struct xyarray *fd; struct xyarray *fd;
struct perf_counts *counts;
int idx; int idx;
void *priv; void *priv;
}; };
...@@ -19,10 +38,70 @@ struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx); ...@@ -19,10 +38,70 @@ struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx);
void perf_evsel__delete(struct perf_evsel *evsel); void perf_evsel__delete(struct perf_evsel *evsel);
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
void perf_evsel__free_fd(struct perf_evsel *evsel); void perf_evsel__free_fd(struct perf_evsel *evsel);
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
#define perf_evsel__match(evsel, t, c) \ #define perf_evsel__match(evsel, t, c) \
(evsel->attr.type == PERF_TYPE_##t && \ (evsel->attr.type == PERF_TYPE_##t && \
evsel->attr.config == PERF_COUNT_##c) evsel->attr.config == PERF_COUNT_##c)
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
int cpu, int thread, bool scale);
/**
* perf_evsel__read_on_cpu - Read out the results on a CPU and thread
*
* @evsel - event selector to read value
* @cpu - CPU of interest
* @thread - thread of interest
*/
static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
int cpu, int thread)
{
return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
}
/**
* perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
*
* @evsel - event selector to read value
* @cpu - CPU of interest
* @thread - thread of interest
*/
static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
int cpu, int thread)
{
return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
}
int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads,
bool scale);
/**
* perf_evsel__read - Read the aggregate results on all CPUs
*
* @evsel - event selector to read value
* @ncpus - Number of cpus affected, from zero
* @nthreads - Number of threads affected, from zero
*/
static inline int perf_evsel__read(struct perf_evsel *evsel,
int ncpus, int nthreads)
{
return __perf_evsel__read(evsel, ncpus, nthreads, false);
}
/**
* perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled
*
* @evsel - event selector to read value
* @ncpus - Number of cpus affected, from zero
* @nthreads - Number of threads affected, from zero
*/
static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
int ncpus, int nthreads)
{
return __perf_evsel__read(evsel, ncpus, nthreads, true);
}
#endif /* __PERF_EVSEL_H */ #endif /* __PERF_EVSEL_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment