Commit 2bdf4d7e authored by Ian Rogers's avatar Ian Rogers Committed by Arnaldo Carvalho de Melo

perf parse-events: Wildcard legacy cache events

It is inconsistent that "perf stat -e instructions-retired" wildcard
opens on all PMUs while legacy cache events like "perf stat -e
L1-dcache-load-miss" do not. A behavior introduced by hybrid is that a
legacy cache event like L1-dcache-load-miss should wildcard open on
all hybrid PMUs. Previously hybrid would call to is_event_supported
for each PMU, a failure of which results in the event not being
added. This isn't done in this case as the parser should just create
perf_event_attr and the later open should fail, or the counter give
"<not counted>". If this wants to be avoided then the PMU can be named
with the event.
Signed-off-by: default avatarIan Rogers <irogers@google.com>
Tested-by: default avatarKan Liang <kan.liang@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ahmad Yasin <ahmad.yasin@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Caleb Biggers <caleb.biggers@intel.com>
Cc: Edward Baker <edward.baker@intel.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kang Minchul <tegongkang@gmail.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Perry Taylor <perry.taylor@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Samantha Alt <samantha.alt@intel.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Sumanth Korikkar <sumanthk@linux.ibm.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Tiezhu Yang <yangtiezhu@loongson.cn>
Cc: Weilin Wang <weilin.wang@intel.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Cc: Yang Jihong <yangjihong1@huawei.com>
Link: https://lore.kernel.org/r/20230502223851.2234828-25-irogers@google.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 6fd1e519
...@@ -179,36 +179,3 @@ int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state, ...@@ -179,36 +179,3 @@ int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state,
return add_raw_hybrid(parse_state, list, attr, name, metric_id, return add_raw_hybrid(parse_state, list, attr, name, metric_id,
config_terms); config_terms);
} }
int parse_events__add_cache_hybrid(struct list_head *list, int *idx,
struct perf_event_attr *attr,
const char *name,
const char *metric_id,
struct list_head *config_terms,
bool *hybrid,
struct parse_events_state *parse_state)
{
struct perf_pmu *pmu;
int ret;
*hybrid = false;
if (!perf_pmu__has_hybrid())
return 0;
*hybrid = true;
perf_pmu__for_each_hybrid_pmu(pmu) {
LIST_HEAD(terms);
if (pmu_cmp(parse_state, pmu))
continue;
copy_config_terms(&terms, config_terms);
ret = create_event_hybrid(PERF_TYPE_HW_CACHE, idx, list,
attr, name, metric_id, &terms, pmu);
free_config_terms(&terms);
if (ret)
return ret;
}
return 0;
}
...@@ -15,11 +15,4 @@ int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state, ...@@ -15,11 +15,4 @@ int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state,
struct list_head *config_terms, struct list_head *config_terms,
bool *hybrid); bool *hybrid);
int parse_events__add_cache_hybrid(struct list_head *list, int *idx,
struct perf_event_attr *attr,
const char *name, const char *metric_id,
struct list_head *config_terms,
bool *hybrid,
struct parse_events_state *parse_state);
#endif /* __PERF_PARSE_EVENTS_HYBRID_H */ #endif /* __PERF_PARSE_EVENTS_HYBRID_H */
...@@ -472,22 +472,31 @@ static int parse_events__decode_legacy_cache(const char *name, int pmu_type, __u ...@@ -472,22 +472,31 @@ static int parse_events__decode_legacy_cache(const char *name, int pmu_type, __u
int parse_events_add_cache(struct list_head *list, int *idx, const char *name, int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
struct parse_events_error *err, struct parse_events_error *err,
struct list_head *head_config, struct list_head *head_config)
struct parse_events_state *parse_state)
{ {
struct perf_event_attr attr; struct perf_pmu *pmu = NULL;
bool found_supported = false;
const char *config_name = get_config_name(head_config);
const char *metric_id = get_config_metric_id(head_config);
while ((pmu = perf_pmu__scan(pmu)) != NULL) {
LIST_HEAD(config_terms); LIST_HEAD(config_terms);
const char *config_name, *metric_id; struct perf_event_attr attr;
int ret; int ret;
bool hybrid;
/* Skip unsupported PMUs. */
if (!perf_pmu__supports_legacy_cache(pmu))
continue;
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, sizeof(attr));
attr.type = PERF_TYPE_HW_CACHE; attr.type = PERF_TYPE_HW_CACHE;
ret = parse_events__decode_legacy_cache(name, /*pmu_type=*/0, &attr.config);
ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config);
if (ret) if (ret)
return ret; return ret;
found_supported = true;
if (head_config) { if (head_config) {
if (config_attr(&attr, head_config, err, if (config_attr(&attr, head_config, err,
config_term_common)) config_term_common))
...@@ -497,21 +506,14 @@ int parse_events_add_cache(struct list_head *list, int *idx, const char *name, ...@@ -497,21 +506,14 @@ int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
return -ENOMEM; return -ENOMEM;
} }
config_name = get_config_name(head_config); if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name,
metric_id = get_config_metric_id(head_config); metric_id, pmu, &config_terms, /*auto_merge_stats=*/false,
ret = parse_events__add_cache_hybrid(list, idx, &attr, /*cpu_list=*/NULL) == NULL)
config_name ? : name, return -ENOMEM;
metric_id,
&config_terms,
&hybrid, parse_state);
if (hybrid)
goto out_free_terms;
ret = add_event(list, idx, &attr, config_name ? : name, metric_id,
&config_terms);
out_free_terms:
free_config_terms(&config_terms); free_config_terms(&config_terms);
return ret; }
return found_supported ? 0 : -EINVAL;
} }
#ifdef HAVE_LIBTRACEEVENT #ifdef HAVE_LIBTRACEEVENT
......
...@@ -172,8 +172,7 @@ int parse_events_add_tool(struct parse_events_state *parse_state, ...@@ -172,8 +172,7 @@ int parse_events_add_tool(struct parse_events_state *parse_state,
int tool_event); int tool_event);
int parse_events_add_cache(struct list_head *list, int *idx, const char *name, int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
struct parse_events_error *error, struct parse_events_error *error,
struct list_head *head_config, struct list_head *head_config);
struct parse_events_state *parse_state);
int parse_events_add_breakpoint(struct list_head *list, int *idx, int parse_events_add_breakpoint(struct list_head *list, int *idx,
u64 addr, char *type, u64 len); u64 addr, char *type, u64 len);
int parse_events_add_pmu(struct parse_events_state *parse_state, int parse_events_add_pmu(struct parse_events_state *parse_state,
......
...@@ -476,7 +476,7 @@ PE_LEGACY_CACHE opt_event_config ...@@ -476,7 +476,7 @@ PE_LEGACY_CACHE opt_event_config
list = alloc_list(); list = alloc_list();
ABORT_ON(!list); ABORT_ON(!list);
err = parse_events_add_cache(list, &parse_state->idx, $1, error, $2, parse_state); err = parse_events_add_cache(list, &parse_state->idx, $1, error, $2);
parse_events_terms__delete($2); parse_events_terms__delete($2);
free($1); free($1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment