Commit 71c86cda authored by Zhengjun Xing's avatar Zhengjun Xing Committed by Arnaldo Carvalho de Melo

perf parse-events: Remove "not supported" hybrid cache events

By default, we create two hybrid cache events, one is for cpu_core, and
another is for cpu_atom. But Some hybrid hardware cache events are only
available on one CPU PMU. For example, the 'L1-dcache-load-misses' is only
available on cpu_core, while the 'L1-icache-loads' is only available on
cpu_atom. We need to remove "not supported" hybrid cache events. By
extending is_event_supported() to global API and using it to check if the
hybrid cache events are supported before being created, we can remove the
"not supported" hybrid cache events.

Before:

 # ./perf stat -e L1-dcache-load-misses,L1-icache-loads -a sleep 1

 Performance counter stats for 'system wide':

            52,570      cpu_core/L1-dcache-load-misses/
   <not supported>      cpu_atom/L1-dcache-load-misses/
   <not supported>      cpu_core/L1-icache-loads/
         1,471,817      cpu_atom/L1-icache-loads/

       1.004915229 seconds time elapsed

After:

 # ./perf stat -e L1-dcache-load-misses,L1-icache-loads -a sleep 1

 Performance counter stats for 'system wide':

            54,510      cpu_core/L1-dcache-load-misses/
         1,441,286      cpu_atom/L1-icache-loads/

       1.005114281 seconds time elapsed

Fixes: 30def61f ("perf parse-events: Create two hybrid cache events")
Reported-by: default avatarYi Ammy <ammy.yi@intel.com>
Reviewed-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarXing Zhengjun <zhengjun.xing@linux.intel.com>
Acked-by: default avatarIan Rogers <irogers@google.com>
Cc: Alexander Shishkin <alexander.shishkin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220923030013.3726410-2-zhengjun.xing@linux.intel.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent e28c0787
...@@ -33,7 +33,8 @@ static void config_hybrid_attr(struct perf_event_attr *attr, ...@@ -33,7 +33,8 @@ static void config_hybrid_attr(struct perf_event_attr *attr,
* If the PMU type ID is 0, the PERF_TYPE_RAW will be applied. * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
*/ */
attr->type = type; attr->type = type;
attr->config = attr->config | ((__u64)pmu_type << PERF_PMU_TYPE_SHIFT); attr->config = (attr->config & PERF_HW_EVENT_MASK) |
((__u64)pmu_type << PERF_PMU_TYPE_SHIFT);
} }
static int create_event_hybrid(__u32 config_type, int *idx, static int create_event_hybrid(__u32 config_type, int *idx,
...@@ -48,13 +49,25 @@ static int create_event_hybrid(__u32 config_type, int *idx, ...@@ -48,13 +49,25 @@ static int create_event_hybrid(__u32 config_type, int *idx,
__u64 config = attr->config; __u64 config = attr->config;
config_hybrid_attr(attr, config_type, pmu->type); config_hybrid_attr(attr, config_type, pmu->type);
/*
* Some hybrid hardware cache events are only available on one CPU
* PMU. For example, the 'L1-dcache-load-misses' is only available
* on cpu_core, while the 'L1-icache-loads' is only available on
* cpu_atom. We need to remove "not supported" hybrid cache events.
*/
if (attr->type == PERF_TYPE_HW_CACHE
&& !is_event_supported(attr->type, attr->config))
return 0;
evsel = parse_events__add_event_hybrid(list, idx, attr, name, metric_id, evsel = parse_events__add_event_hybrid(list, idx, attr, name, metric_id,
pmu, config_terms); pmu, config_terms);
if (evsel) if (evsel) {
evsel->pmu_name = strdup(pmu->name); evsel->pmu_name = strdup(pmu->name);
else if (!evsel->pmu_name)
return -ENOMEM;
} else
return -ENOMEM; return -ENOMEM;
attr->type = type; attr->type = type;
attr->config = config; attr->config = config;
return 0; return 0;
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include "util/parse-events-hybrid.h" #include "util/parse-events-hybrid.h"
#include "util/pmu-hybrid.h" #include "util/pmu-hybrid.h"
#include "tracepoint.h" #include "tracepoint.h"
#include "thread_map.h"
#define MAX_NAME_LEN 100 #define MAX_NAME_LEN 100
...@@ -157,6 +158,44 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { ...@@ -157,6 +158,44 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
bool is_event_supported(u8 type, u64 config)
{
bool ret = true;
int open_return;
struct evsel *evsel;
struct perf_event_attr attr = {
.type = type,
.config = config,
.disabled = 1,
};
struct perf_thread_map *tmap = thread_map__new_by_tid(0);
if (tmap == NULL)
return false;
evsel = evsel__new(&attr);
if (evsel) {
open_return = evsel__open(evsel, NULL, tmap);
ret = open_return >= 0;
if (open_return == -EACCES) {
/*
* This happens if the paranoid value
* /proc/sys/kernel/perf_event_paranoid is set to 2
* Re-run with exclude_kernel set; we don't do that
* by default as some ARM machines do not support it.
*
*/
evsel->core.attr.exclude_kernel = 1;
ret = evsel__open(evsel, NULL, tmap) >= 0;
}
evsel__delete(evsel);
}
perf_thread_map__put(tmap);
return ret;
}
const char *event_type(int type) const char *event_type(int type)
{ {
switch (type) { switch (type) {
......
...@@ -19,6 +19,7 @@ struct option; ...@@ -19,6 +19,7 @@ struct option;
struct perf_pmu; struct perf_pmu;
bool have_tracepoints(struct list_head *evlist); bool have_tracepoints(struct list_head *evlist);
bool is_event_supported(u8 type, u64 config);
const char *event_type(int type); const char *event_type(int type);
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
#include "probe-file.h" #include "probe-file.h"
#include "string2.h" #include "string2.h"
#include "strlist.h" #include "strlist.h"
#include "thread_map.h"
#include "tracepoint.h" #include "tracepoint.h"
#include "pfm.h" #include "pfm.h"
#include "pmu-hybrid.h" #include "pmu-hybrid.h"
...@@ -239,44 +238,6 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob, ...@@ -239,44 +238,6 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
strlist__delete(sdtlist); strlist__delete(sdtlist);
} }
static bool is_event_supported(u8 type, u64 config)
{
bool ret = true;
int open_return;
struct evsel *evsel;
struct perf_event_attr attr = {
.type = type,
.config = config,
.disabled = 1,
};
struct perf_thread_map *tmap = thread_map__new_by_tid(0);
if (tmap == NULL)
return false;
evsel = evsel__new(&attr);
if (evsel) {
open_return = evsel__open(evsel, NULL, tmap);
ret = open_return >= 0;
if (open_return == -EACCES) {
/*
* This happens if the paranoid value
* /proc/sys/kernel/perf_event_paranoid is set to 2
* Re-run with exclude_kernel set; we don't do that
* by default as some ARM machines do not support it.
*
*/
evsel->core.attr.exclude_kernel = 1;
ret = evsel__open(evsel, NULL, tmap) >= 0;
}
evsel__delete(evsel);
}
perf_thread_map__put(tmap);
return ret;
}
int print_hwcache_events(const char *event_glob, bool name_only) int print_hwcache_events(const char *event_glob, bool name_only)
{ {
unsigned int type, op, i, evt_i = 0, evt_num = 0, npmus = 0; unsigned int type, op, i, evt_i = 0, evt_num = 0, npmus = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment