Commit a30450e6 authored by Kan Liang's avatar Kan Liang Committed by Namhyung Kim

perf mem: Clean up perf_mem_events__ptr()

The mem_events can be retrieved from the struct perf_pmu now. An ARCH
specific perf_mem_events__ptr() is not required anymore. Remove all of
them.

The Intel hybrid has multiple mem-events-supported PMUs. But they share
the same mem_events. Other ARCHs only support one mem-events-supported
PMU. In the configuration, it's good enough to only configure the
mem_events for one PMU. Add perf_mem_events_find_pmu() which returns the
first mem-events-supported PMU.

In the perf_mem_events__init(), the perf_pmus__scan() is not required
anymore. It avoids checking the sysfs for every PMU on the system.

Make the perf_mem_events__record_args() more generic. Remove the
perf_mem_events__print_unsupport_hybrid().

Since pmu is added as a new parameter, rename perf_mem_events__ptr() to
perf_pmu__mem_events_ptr(). Several other functions also do a similar
rename.
Reviewed-by: default avatarIan Rogers <irogers@google.com>
Reviewed-by: default avatarKajol Jain <kjain@linux.ibm.com>
Tested-by: default avatarRavi Bangoria <ravi.bangoria@amd.com>
Tested-by: default avatarKajol jain <kjain@linux.ibm.com>
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Cc: james.clark@arm.com
Cc: will@kernel.org
Cc: leo.yan@linaro.org
Cc: mike.leach@linaro.org
Cc: renyu.zj@linux.alibaba.com
Cc: yuhaixin.yhx@linux.alibaba.com
Cc: tmricht@linux.ibm.com
Cc: atrajeev@linux.vnet.ibm.com
Cc: linux-arm-kernel@lists.infradead.org
Cc: john.g.garry@oracle.com
Link: https://lore.kernel.org/r/20240123185036.3461837-3-kan.liang@linux.intel.comSigned-off-by: default avatarNamhyung Kim <namhyung@kernel.org>
parent bb65acdc
...@@ -13,17 +13,9 @@ struct perf_mem_event perf_mem_events_arm[PERF_MEM_EVENTS__MAX] = { ...@@ -13,17 +13,9 @@ struct perf_mem_event perf_mem_events_arm[PERF_MEM_EVENTS__MAX] = {
static char mem_ev_name[100]; static char mem_ev_name[100];
struct perf_mem_event *perf_mem_events__ptr(int i)
{
if (i >= PERF_MEM_EVENTS__MAX)
return NULL;
return &perf_mem_events_arm[i];
}
const char *perf_mem_events__name(int i, const char *pmu_name __maybe_unused) const char *perf_mem_events__name(int i, const char *pmu_name __maybe_unused)
{ {
struct perf_mem_event *e = perf_mem_events__ptr(i); struct perf_mem_event *e = &perf_mem_events_arm[i];
if (i >= PERF_MEM_EVENTS__MAX) if (i >= PERF_MEM_EVENTS__MAX)
return NULL; return NULL;
......
...@@ -28,17 +28,6 @@ struct perf_mem_event perf_mem_events_amd[PERF_MEM_EVENTS__MAX] = { ...@@ -28,17 +28,6 @@ struct perf_mem_event perf_mem_events_amd[PERF_MEM_EVENTS__MAX] = {
E("mem-ldst", "ibs_op//", "ibs_op"), E("mem-ldst", "ibs_op//", "ibs_op"),
}; };
struct perf_mem_event *perf_mem_events__ptr(int i)
{
if (i >= PERF_MEM_EVENTS__MAX)
return NULL;
if (x86__is_amd_cpu())
return &perf_mem_events_amd[i];
return &perf_mem_events_intel[i];
}
bool is_mem_loads_aux_event(struct evsel *leader) bool is_mem_loads_aux_event(struct evsel *leader)
{ {
struct perf_pmu *pmu = perf_pmus__find("cpu"); struct perf_pmu *pmu = perf_pmus__find("cpu");
...@@ -54,7 +43,12 @@ bool is_mem_loads_aux_event(struct evsel *leader) ...@@ -54,7 +43,12 @@ bool is_mem_loads_aux_event(struct evsel *leader)
const char *perf_mem_events__name(int i, const char *pmu_name) const char *perf_mem_events__name(int i, const char *pmu_name)
{ {
struct perf_mem_event *e = perf_mem_events__ptr(i); struct perf_mem_event *e;
if (x86__is_amd_cpu())
e = &perf_mem_events_amd[i];
else
e = &perf_mem_events_intel[i];
if (!e) if (!e)
return NULL; return NULL;
......
...@@ -3215,12 +3215,19 @@ static int parse_record_events(const struct option *opt, ...@@ -3215,12 +3215,19 @@ static int parse_record_events(const struct option *opt,
const char *str, int unset __maybe_unused) const char *str, int unset __maybe_unused)
{ {
bool *event_set = (bool *) opt->value; bool *event_set = (bool *) opt->value;
struct perf_pmu *pmu;
pmu = perf_mem_events_find_pmu();
if (!pmu) {
pr_err("failed: there is no PMU that supports perf c2c\n");
exit(-1);
}
if (!strcmp(str, "list")) { if (!strcmp(str, "list")) {
perf_mem_events__list(); perf_pmu__mem_events_list(pmu);
exit(0); exit(0);
} }
if (perf_mem_events__parse(str)) if (perf_pmu__mem_events_parse(pmu, str))
exit(-1); exit(-1);
*event_set = true; *event_set = true;
...@@ -3245,6 +3252,7 @@ static int perf_c2c__record(int argc, const char **argv) ...@@ -3245,6 +3252,7 @@ static int perf_c2c__record(int argc, const char **argv)
bool all_user = false, all_kernel = false; bool all_user = false, all_kernel = false;
bool event_set = false; bool event_set = false;
struct perf_mem_event *e; struct perf_mem_event *e;
struct perf_pmu *pmu;
struct option options[] = { struct option options[] = {
OPT_CALLBACK('e', "event", &event_set, "event", OPT_CALLBACK('e', "event", &event_set, "event",
"event selector. Use 'perf c2c record -e list' to list available events", "event selector. Use 'perf c2c record -e list' to list available events",
...@@ -3256,7 +3264,13 @@ static int perf_c2c__record(int argc, const char **argv) ...@@ -3256,7 +3264,13 @@ static int perf_c2c__record(int argc, const char **argv)
OPT_END() OPT_END()
}; };
if (perf_mem_events__init()) { pmu = perf_mem_events_find_pmu();
if (!pmu) {
pr_err("failed: no PMU supports the memory events\n");
return -1;
}
if (perf_pmu__mem_events_init(pmu)) {
pr_err("failed: memory events not supported\n"); pr_err("failed: memory events not supported\n");
return -1; return -1;
} }
...@@ -3280,7 +3294,7 @@ static int perf_c2c__record(int argc, const char **argv) ...@@ -3280,7 +3294,7 @@ static int perf_c2c__record(int argc, const char **argv)
rec_argv[i++] = "record"; rec_argv[i++] = "record";
if (!event_set) { if (!event_set) {
e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD_STORE); e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD_STORE);
/* /*
* The load and store operations are required, use the event * The load and store operations are required, use the event
* PERF_MEM_EVENTS__LOAD_STORE if it is supported. * PERF_MEM_EVENTS__LOAD_STORE if it is supported.
...@@ -3289,15 +3303,15 @@ static int perf_c2c__record(int argc, const char **argv) ...@@ -3289,15 +3303,15 @@ static int perf_c2c__record(int argc, const char **argv)
e->record = true; e->record = true;
rec_argv[i++] = "-W"; rec_argv[i++] = "-W";
} else { } else {
e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD); e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD);
e->record = true; e->record = true;
e = perf_mem_events__ptr(PERF_MEM_EVENTS__STORE); e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__STORE);
e->record = true; e->record = true;
} }
} }
e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD); e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD);
if (e->record) if (e->record)
rec_argv[i++] = "-W"; rec_argv[i++] = "-W";
......
...@@ -43,12 +43,19 @@ static int parse_record_events(const struct option *opt, ...@@ -43,12 +43,19 @@ static int parse_record_events(const struct option *opt,
const char *str, int unset __maybe_unused) const char *str, int unset __maybe_unused)
{ {
struct perf_mem *mem = *(struct perf_mem **)opt->value; struct perf_mem *mem = *(struct perf_mem **)opt->value;
struct perf_pmu *pmu;
pmu = perf_mem_events_find_pmu();
if (!pmu) {
pr_err("failed: there is no PMU that supports perf mem\n");
exit(-1);
}
if (!strcmp(str, "list")) { if (!strcmp(str, "list")) {
perf_mem_events__list(); perf_pmu__mem_events_list(pmu);
exit(0); exit(0);
} }
if (perf_mem_events__parse(str)) if (perf_pmu__mem_events_parse(pmu, str))
exit(-1); exit(-1);
mem->operation = 0; mem->operation = 0;
...@@ -72,6 +79,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) ...@@ -72,6 +79,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
int ret; int ret;
bool all_user = false, all_kernel = false; bool all_user = false, all_kernel = false;
struct perf_mem_event *e; struct perf_mem_event *e;
struct perf_pmu *pmu;
struct option options[] = { struct option options[] = {
OPT_CALLBACK('e', "event", &mem, "event", OPT_CALLBACK('e', "event", &mem, "event",
"event selector. use 'perf mem record -e list' to list available events", "event selector. use 'perf mem record -e list' to list available events",
...@@ -84,7 +92,13 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) ...@@ -84,7 +92,13 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
OPT_END() OPT_END()
}; };
if (perf_mem_events__init()) { pmu = perf_mem_events_find_pmu();
if (!pmu) {
pr_err("failed: no PMU supports the memory events\n");
return -1;
}
if (perf_pmu__mem_events_init(pmu)) {
pr_err("failed: memory events not supported\n"); pr_err("failed: memory events not supported\n");
return -1; return -1;
} }
...@@ -113,7 +127,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) ...@@ -113,7 +127,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
rec_argv[i++] = "record"; rec_argv[i++] = "record";
e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD_STORE); e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD_STORE);
/* /*
* The load and store operations are required, use the event * The load and store operations are required, use the event
...@@ -126,17 +140,17 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) ...@@ -126,17 +140,17 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
rec_argv[i++] = "-W"; rec_argv[i++] = "-W";
} else { } else {
if (mem->operation & MEM_OPERATION_LOAD) { if (mem->operation & MEM_OPERATION_LOAD) {
e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD); e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD);
e->record = true; e->record = true;
} }
if (mem->operation & MEM_OPERATION_STORE) { if (mem->operation & MEM_OPERATION_STORE) {
e = perf_mem_events__ptr(PERF_MEM_EVENTS__STORE); e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__STORE);
e->record = true; e->record = true;
} }
} }
e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD); e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD);
if (e->record) if (e->record)
rec_argv[i++] = "-W"; rec_argv[i++] = "-W";
......
...@@ -29,17 +29,42 @@ struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = { ...@@ -29,17 +29,42 @@ struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
static char mem_loads_name[100]; static char mem_loads_name[100];
static bool mem_loads_name__init; static bool mem_loads_name__init;
struct perf_mem_event * __weak perf_mem_events__ptr(int i) struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i)
{ {
if (i >= PERF_MEM_EVENTS__MAX) if (i >= PERF_MEM_EVENTS__MAX || !pmu)
return NULL; return NULL;
return &perf_mem_events[i]; return &pmu->mem_events[i];
}
static struct perf_pmu *perf_pmus__scan_mem(struct perf_pmu *pmu)
{
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
if (pmu->mem_events)
return pmu;
}
return NULL;
}
struct perf_pmu *perf_mem_events_find_pmu(void)
{
/*
* The current perf mem doesn't support per-PMU configuration.
* The exact same configuration is applied to all the
* mem_events supported PMUs.
* Return the first mem_events supported PMU.
*
* Notes: The only case which may support multiple mem_events
* supported PMUs is Intel hybrid. The exact same mem_events
* is shared among the PMUs. Only configure the first PMU
* is good enough as well.
*/
return perf_pmus__scan_mem(NULL);
} }
const char * __weak perf_mem_events__name(int i, const char *pmu_name __maybe_unused) const char * __weak perf_mem_events__name(int i, const char *pmu_name __maybe_unused)
{ {
struct perf_mem_event *e = perf_mem_events__ptr(i); struct perf_mem_event *e = &perf_mem_events[i];
if (!e) if (!e)
return NULL; return NULL;
...@@ -61,7 +86,7 @@ __weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused) ...@@ -61,7 +86,7 @@ __weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused)
return false; return false;
} }
int perf_mem_events__parse(const char *str) int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str)
{ {
char *tok, *saveptr = NULL; char *tok, *saveptr = NULL;
bool found = false; bool found = false;
...@@ -79,7 +104,7 @@ int perf_mem_events__parse(const char *str) ...@@ -79,7 +104,7 @@ int perf_mem_events__parse(const char *str)
while (tok) { while (tok) {
for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) { for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
struct perf_mem_event *e = perf_mem_events__ptr(j); struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
if (!e->tag) if (!e->tag)
continue; continue;
...@@ -112,7 +137,7 @@ static bool perf_mem_event__supported(const char *mnt, struct perf_pmu *pmu, ...@@ -112,7 +137,7 @@ static bool perf_mem_event__supported(const char *mnt, struct perf_pmu *pmu,
return !stat(path, &st); return !stat(path, &st);
} }
int perf_mem_events__init(void) int perf_pmu__mem_events_init(struct perf_pmu *pmu)
{ {
const char *mnt = sysfs__mount(); const char *mnt = sysfs__mount();
bool found = false; bool found = false;
...@@ -122,8 +147,7 @@ int perf_mem_events__init(void) ...@@ -122,8 +147,7 @@ int perf_mem_events__init(void)
return -ENOENT; return -ENOENT;
for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) { for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
struct perf_mem_event *e = perf_mem_events__ptr(j); struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
struct perf_pmu *pmu = NULL;
/* /*
* If the event entry isn't valid, skip initialization * If the event entry isn't valid, skip initialization
...@@ -132,29 +156,20 @@ int perf_mem_events__init(void) ...@@ -132,29 +156,20 @@ int perf_mem_events__init(void)
if (!e->tag) if (!e->tag)
continue; continue;
/* e->supported |= perf_mem_event__supported(mnt, pmu, e);
* Scan all PMUs not just core ones, since perf mem/c2c on if (e->supported)
* platforms like AMD uses IBS OP PMU which is independent found = true;
* of core PMU.
*/
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
e->supported |= perf_mem_event__supported(mnt, pmu, e);
if (e->supported) {
found = true;
break;
}
}
} }
return found ? 0 : -ENOENT; return found ? 0 : -ENOENT;
} }
void perf_mem_events__list(void) void perf_pmu__mem_events_list(struct perf_pmu *pmu)
{ {
int j; int j;
for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) { for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
struct perf_mem_event *e = perf_mem_events__ptr(j); struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
fprintf(stderr, "%-*s%-*s%s", fprintf(stderr, "%-*s%-*s%s",
e->tag ? 13 : 0, e->tag ? 13 : 0,
...@@ -165,50 +180,32 @@ void perf_mem_events__list(void) ...@@ -165,50 +180,32 @@ void perf_mem_events__list(void)
} }
} }
static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
int idx)
{
const char *mnt = sysfs__mount();
struct perf_pmu *pmu = NULL;
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
if (!perf_mem_event__supported(mnt, pmu, e)) {
pr_err("failed: event '%s' not supported\n",
perf_mem_events__name(idx, pmu->name));
}
}
}
int perf_mem_events__record_args(const char **rec_argv, int *argv_nr, int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
char **rec_tmp, int *tmp_nr) char **rec_tmp, int *tmp_nr)
{ {
const char *mnt = sysfs__mount(); const char *mnt = sysfs__mount();
struct perf_pmu *pmu = NULL;
int i = *argv_nr, k = 0; int i = *argv_nr, k = 0;
struct perf_mem_event *e; struct perf_mem_event *e;
for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
e = perf_mem_events__ptr(j);
if (!e->record)
continue;
if (perf_pmus__num_mem_pmus() == 1) { while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
if (!e->supported) { for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
pr_err("failed: event '%s' not supported\n", e = perf_pmu__mem_events_ptr(pmu, j);
perf_mem_events__name(j, NULL));
return -1;
}
rec_argv[i++] = "-e"; if (!e->record)
rec_argv[i++] = perf_mem_events__name(j, NULL); continue;
} else {
struct perf_pmu *pmu = NULL;
if (!e->supported) { if (!e->supported) {
perf_mem_events__print_unsupport_hybrid(e, j); pr_err("failed: event '%s' not supported\n",
perf_mem_events__name(j, pmu->name));
return -1; return -1;
} }
while ((pmu = perf_pmus__scan(pmu)) != NULL) { if (perf_pmus__num_mem_pmus() == 1) {
rec_argv[i++] = "-e";
rec_argv[i++] = perf_mem_events__name(j, NULL);
} else {
const char *s = perf_mem_events__name(j, pmu->name); const char *s = perf_mem_events__name(j, pmu->name);
if (!perf_mem_event__supported(mnt, pmu, e)) if (!perf_mem_event__supported(mnt, pmu, e))
......
...@@ -36,14 +36,15 @@ enum { ...@@ -36,14 +36,15 @@ enum {
extern unsigned int perf_mem_events__loads_ldlat; extern unsigned int perf_mem_events__loads_ldlat;
extern struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX]; extern struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX];
int perf_mem_events__parse(const char *str); int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str);
int perf_mem_events__init(void); int perf_pmu__mem_events_init(struct perf_pmu *pmu);
const char *perf_mem_events__name(int i, const char *pmu_name); const char *perf_mem_events__name(int i, const char *pmu_name);
struct perf_mem_event *perf_mem_events__ptr(int i); struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i);
struct perf_pmu *perf_mem_events_find_pmu(void);
bool is_mem_loads_aux_event(struct evsel *leader); bool is_mem_loads_aux_event(struct evsel *leader);
void perf_mem_events__list(void); void perf_pmu__mem_events_list(struct perf_pmu *pmu);
int perf_mem_events__record_args(const char **rec_argv, int *argv_nr, int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
char **rec_tmp, int *tmp_nr); char **rec_tmp, int *tmp_nr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment