Commit 40c84321 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf-tools-for-v5.17-2022-01-22' of...

Merge tag 'perf-tools-for-v5.17-2022-01-22' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

Pull more perf tools updates from Arnaldo Carvalho de Melo:

 - Fix printing 'phys_addr' in 'perf script'.

 - Fix failure to add events with 'perf probe' in ppc64 due to not
   removing leading dot (ppc64 ABIv1).

 - Fix cpu_map__item() python binding building.

 - Support event alias in form foo-bar-baz, add pmu-events and
   parse-event tests for it.

 - No need to setup affinities when starting a workload or attaching to
   a pid.

 - Use path__join() to compose a path instead of ad-hoc snprintf()
   equivalent.

 - Override attr->sample_period for non-libpfm4 events.

 - Use libperf cpumap APIs instead of accessing the internal state
   directly.

 - Sync x86 arch prctl headers and files changed by the new
   set_mempolicy_home_node syscall with the kernel sources.

 - Remove duplicate include in cpumap.h.

 - Remove redundant err variable.

* tag 'perf-tools-for-v5.17-2022-01-22' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux:
  perf tools: Remove redundant err variable
  perf test: Add parse-events test for aliases with hyphens
  perf test: Add pmu-events test for aliases with hyphens
  perf parse-events: Support event alias in form foo-bar-baz
  perf evsel: Override attr->sample_period for non-libpfm4 events
  perf cpumap: Remove duplicate include in cpumap.h
  perf cpumap: Migrate to libperf cpumap api
  perf python: Fix cpu_map__item() building
  perf script: Fix printing 'phys_addr' failure issue
  tools headers UAPI: Sync files changed by new set_mempolicy_home_node syscall
  tools headers UAPI: Sync x86 arch prctl headers with the kernel sources
  perf machine: Use path__join() to compose a path instead of snprintf(dir, '/', filename)
  perf evlist: No need to setup affinities when disabling events for pid targets
  perf evlist: No need to setup affinities when enabling events for pid targets
  perf stat: No need to setup affinities when starting a workload
  perf affinity: Allow passing a NULL arg to affinity__cleanup()
  perf probe: Fix ppc64 'perf probe add events failed' case
parents 67bfce0e f0ac5b85
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
#define ARCH_GET_XCOMP_SUPP 0x1021 #define ARCH_GET_XCOMP_SUPP 0x1021
#define ARCH_GET_XCOMP_PERM 0x1022 #define ARCH_GET_XCOMP_PERM 0x1022
#define ARCH_REQ_XCOMP_PERM 0x1023 #define ARCH_REQ_XCOMP_PERM 0x1023
#define ARCH_GET_XCOMP_GUEST_PERM 0x1024
#define ARCH_REQ_XCOMP_GUEST_PERM 0x1025
#define ARCH_MAP_VDSO_X32 0x2001 #define ARCH_MAP_VDSO_X32 0x2001
#define ARCH_MAP_VDSO_32 0x2002 #define ARCH_MAP_VDSO_32 0x2002
......
...@@ -883,8 +883,11 @@ __SYSCALL(__NR_process_mrelease, sys_process_mrelease) ...@@ -883,8 +883,11 @@ __SYSCALL(__NR_process_mrelease, sys_process_mrelease)
#define __NR_futex_waitv 449 #define __NR_futex_waitv 449
__SYSCALL(__NR_futex_waitv, sys_futex_waitv) __SYSCALL(__NR_futex_waitv, sys_futex_waitv)
#define __NR_set_mempolicy_home_node 450
__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
#undef __NR_syscalls #undef __NR_syscalls
#define __NR_syscalls 450 #define __NR_syscalls 451
/* /*
* 32 bit systems traditionally used different * 32 bit systems traditionally used different
......
...@@ -141,7 +141,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, ...@@ -141,7 +141,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
} }
if (evsel->fd == NULL && if (evsel->fd == NULL &&
perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) perf_evsel__alloc_fd(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
return -ENOMEM; return -ENOMEM;
perf_cpu_map__for_each_cpu(cpu, idx, cpus) { perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
...@@ -384,7 +384,7 @@ int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter) ...@@ -384,7 +384,7 @@ int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
{ {
int err = 0, i; int err = 0, i;
for (i = 0; i < evsel->cpus->nr && !err; i++) for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++)
err = perf_evsel__run_ioctl(evsel, err = perf_evsel__run_ioctl(evsel,
PERF_EVENT_IOC_SET_FILTER, PERF_EVENT_IOC_SET_FILTER,
(void *)filter, i); (void *)filter, i);
......
...@@ -364,3 +364,4 @@ ...@@ -364,3 +364,4 @@
# 447 reserved for memfd_secret # 447 reserved for memfd_secret
448 n64 process_mrelease sys_process_mrelease 448 n64 process_mrelease sys_process_mrelease
449 n64 futex_waitv sys_futex_waitv 449 n64 futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
...@@ -529,3 +529,4 @@ ...@@ -529,3 +529,4 @@
# 447 reserved for memfd_secret # 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease 448 common process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv 449 common futex_waitv sys_futex_waitv
450 nospu set_mempolicy_home_node sys_set_mempolicy_home_node
...@@ -452,3 +452,4 @@ ...@@ -452,3 +452,4 @@
# 447 reserved for memfd_secret # 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease sys_process_mrelease 448 common process_mrelease sys_process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv sys_futex_waitv 449 common futex_waitv sys_futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node
...@@ -371,6 +371,7 @@ ...@@ -371,6 +371,7 @@
447 common memfd_secret sys_memfd_secret 447 common memfd_secret sys_memfd_secret
448 common process_mrelease sys_process_mrelease 448 common process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv 449 common futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
# #
# Due to a historical design error, certain syscalls are numbered differently # Due to a historical design error, certain syscalls are numbered differently
......
...@@ -333,7 +333,7 @@ int bench_epoll_ctl(int argc, const char **argv) ...@@ -333,7 +333,7 @@ int bench_epoll_ctl(int argc, const char **argv)
/* default to the number of CPUs */ /* default to the number of CPUs */
if (!nthreads) if (!nthreads)
nthreads = cpu->nr; nthreads = perf_cpu_map__nr(cpu);
worker = calloc(nthreads, sizeof(*worker)); worker = calloc(nthreads, sizeof(*worker));
if (!worker) if (!worker)
......
...@@ -452,7 +452,7 @@ int bench_epoll_wait(int argc, const char **argv) ...@@ -452,7 +452,7 @@ int bench_epoll_wait(int argc, const char **argv)
/* default to the number of CPUs and leave one for the writer pthread */ /* default to the number of CPUs and leave one for the writer pthread */
if (!nthreads) if (!nthreads)
nthreads = cpu->nr - 1; nthreads = perf_cpu_map__nr(cpu) - 1;
worker = calloc(nthreads, sizeof(*worker)); worker = calloc(nthreads, sizeof(*worker));
if (!worker) { if (!worker) {
......
...@@ -71,7 +71,7 @@ static int evlist__count_evsel_fds(struct evlist *evlist) ...@@ -71,7 +71,7 @@ static int evlist__count_evsel_fds(struct evlist *evlist)
int cnt = 0; int cnt = 0;
evlist__for_each_entry(evlist, evsel) evlist__for_each_entry(evlist, evsel)
cnt += evsel->core.threads->nr * evsel->core.cpus->nr; cnt += evsel->core.threads->nr * perf_cpu_map__nr(evsel->core.cpus);
return cnt; return cnt;
} }
...@@ -151,7 +151,7 @@ static int bench_evlist_open_close__run(char *evstr) ...@@ -151,7 +151,7 @@ static int bench_evlist_open_close__run(char *evstr)
init_stats(&time_stats); init_stats(&time_stats);
printf(" Number of cpus:\t%d\n", evlist->core.cpus->nr); printf(" Number of cpus:\t%d\n", perf_cpu_map__nr(evlist->core.cpus));
printf(" Number of threads:\t%d\n", evlist->core.threads->nr); printf(" Number of threads:\t%d\n", evlist->core.threads->nr);
printf(" Number of events:\t%d (%d fds)\n", printf(" Number of events:\t%d (%d fds)\n",
evlist->core.nr_entries, evlist__count_evsel_fds(evlist)); evlist->core.nr_entries, evlist__count_evsel_fds(evlist));
......
...@@ -150,7 +150,7 @@ int bench_futex_hash(int argc, const char **argv) ...@@ -150,7 +150,7 @@ int bench_futex_hash(int argc, const char **argv)
} }
if (!params.nthreads) /* default to the number of CPUs */ if (!params.nthreads) /* default to the number of CPUs */
params.nthreads = cpu->nr; params.nthreads = perf_cpu_map__nr(cpu);
worker = calloc(params.nthreads, sizeof(*worker)); worker = calloc(params.nthreads, sizeof(*worker));
if (!worker) if (!worker)
......
...@@ -173,7 +173,7 @@ int bench_futex_lock_pi(int argc, const char **argv) ...@@ -173,7 +173,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
} }
if (!params.nthreads) if (!params.nthreads)
params.nthreads = cpu->nr; params.nthreads = perf_cpu_map__nr(cpu);
worker = calloc(params.nthreads, sizeof(*worker)); worker = calloc(params.nthreads, sizeof(*worker));
if (!worker) if (!worker)
......
...@@ -175,7 +175,7 @@ int bench_futex_requeue(int argc, const char **argv) ...@@ -175,7 +175,7 @@ int bench_futex_requeue(int argc, const char **argv)
} }
if (!params.nthreads) if (!params.nthreads)
params.nthreads = cpu->nr; params.nthreads = perf_cpu_map__nr(cpu);
worker = calloc(params.nthreads, sizeof(*worker)); worker = calloc(params.nthreads, sizeof(*worker));
if (!worker) if (!worker)
......
...@@ -252,7 +252,7 @@ int bench_futex_wake_parallel(int argc, const char **argv) ...@@ -252,7 +252,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
err(EXIT_FAILURE, "calloc"); err(EXIT_FAILURE, "calloc");
if (!params.nthreads) if (!params.nthreads)
params.nthreads = cpu->nr; params.nthreads = perf_cpu_map__nr(cpu);
/* some sanity checks */ /* some sanity checks */
if (params.nwakes > params.nthreads || if (params.nwakes > params.nthreads ||
......
...@@ -151,7 +151,7 @@ int bench_futex_wake(int argc, const char **argv) ...@@ -151,7 +151,7 @@ int bench_futex_wake(int argc, const char **argv)
} }
if (!params.nthreads) if (!params.nthreads)
params.nthreads = cpu->nr; params.nthreads = perf_cpu_map__nr(cpu);
worker = calloc(params.nthreads, sizeof(*worker)); worker = calloc(params.nthreads, sizeof(*worker));
if (!worker) if (!worker)
......
...@@ -281,7 +281,7 @@ static int set_tracing_cpumask(struct perf_cpu_map *cpumap) ...@@ -281,7 +281,7 @@ static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
int ret; int ret;
int last_cpu; int last_cpu;
last_cpu = perf_cpu_map__cpu(cpumap, cpumap->nr - 1).cpu; last_cpu = perf_cpu_map__cpu(cpumap, perf_cpu_map__nr(cpumap) - 1).cpu;
mask_size = last_cpu / 4 + 2; /* one more byte for EOS */ mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */ mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
......
...@@ -535,12 +535,9 @@ static int perf_event__repipe_exit(struct perf_tool *tool, ...@@ -535,12 +535,9 @@ static int perf_event__repipe_exit(struct perf_tool *tool,
static int perf_event__repipe_tracing_data(struct perf_session *session, static int perf_event__repipe_tracing_data(struct perf_session *session,
union perf_event *event) union perf_event *event)
{ {
int err;
perf_event__repipe_synth(session->tool, event); perf_event__repipe_synth(session->tool, event);
err = perf_event__process_tracing_data(session, event);
return err; return perf_event__process_tracing_data(session, event);
} }
static int dso__read_build_id(struct dso *dso) static int dso__read_build_id(struct dso *dso)
......
...@@ -515,7 +515,7 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session) ...@@ -515,7 +515,7 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
return -EINVAL; return -EINVAL;
if (PRINT_FIELD(PHYS_ADDR) && if (PRINT_FIELD(PHYS_ADDR) &&
evsel__check_stype(evsel, PERF_SAMPLE_PHYS_ADDR, "PHYS_ADDR", PERF_OUTPUT_PHYS_ADDR)) evsel__do_check_stype(evsel, PERF_SAMPLE_PHYS_ADDR, "PHYS_ADDR", PERF_OUTPUT_PHYS_ADDR, allow_user_set))
return -EINVAL; return -EINVAL;
if (PRINT_FIELD(DATA_PAGE_SIZE) && if (PRINT_FIELD(DATA_PAGE_SIZE) &&
......
...@@ -230,11 +230,12 @@ static bool cpus_map_matched(struct evsel *a, struct evsel *b) ...@@ -230,11 +230,12 @@ static bool cpus_map_matched(struct evsel *a, struct evsel *b)
if (!a->core.cpus || !b->core.cpus) if (!a->core.cpus || !b->core.cpus)
return false; return false;
if (a->core.cpus->nr != b->core.cpus->nr) if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus))
return false; return false;
for (int i = 0; i < a->core.cpus->nr; i++) { for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) {
if (a->core.cpus->map[i].cpu != b->core.cpus->map[i].cpu) if (perf_cpu_map__cpu(a->core.cpus, i).cpu !=
perf_cpu_map__cpu(b->core.cpus, i).cpu)
return false; return false;
} }
...@@ -788,7 +789,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) ...@@ -788,7 +789,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
const bool forks = (argc > 0); const bool forks = (argc > 0);
bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
struct evlist_cpu_iterator evlist_cpu_itr; struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity affinity; struct affinity saved_affinity, *affinity = NULL;
int err; int err;
bool second_pass = false; bool second_pass = false;
...@@ -803,8 +804,11 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) ...@@ -803,8 +804,11 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
if (group) if (group)
evlist__set_leader(evsel_list); evlist__set_leader(evsel_list);
if (affinity__setup(&affinity) < 0) if (!cpu_map__is_dummy(evsel_list->core.cpus)) {
if (affinity__setup(&saved_affinity) < 0)
return -1; return -1;
affinity = &saved_affinity;
}
evlist__for_each_entry(evsel_list, counter) { evlist__for_each_entry(evsel_list, counter) {
if (bpf_counter__load(counter, &target)) if (bpf_counter__load(counter, &target))
...@@ -813,7 +817,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) ...@@ -813,7 +817,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
all_counters_use_bpf = false; all_counters_use_bpf = false;
} }
evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) { evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
counter = evlist_cpu_itr.evsel; counter = evlist_cpu_itr.evsel;
/* /*
...@@ -869,7 +873,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) ...@@ -869,7 +873,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
*/ */
/* First close errored or weak retry */ /* First close errored or weak retry */
evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) { evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
counter = evlist_cpu_itr.evsel; counter = evlist_cpu_itr.evsel;
if (!counter->reset_group && !counter->errored) if (!counter->reset_group && !counter->errored)
...@@ -878,7 +882,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) ...@@ -878,7 +882,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx);
} }
/* Now reopen weak */ /* Now reopen weak */
evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) { evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
counter = evlist_cpu_itr.evsel; counter = evlist_cpu_itr.evsel;
if (!counter->reset_group && !counter->errored) if (!counter->reset_group && !counter->errored)
...@@ -904,7 +908,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) ...@@ -904,7 +908,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
counter->supported = true; counter->supported = true;
} }
} }
affinity__cleanup(&affinity); affinity__cleanup(affinity);
evlist__for_each_entry(evsel_list, counter) { evlist__for_each_entry(evsel_list, counter) {
if (!counter->supported) { if (!counter->supported) {
......
...@@ -18,6 +18,22 @@ ...@@ -18,6 +18,22 @@
"Invert": "0", "Invert": "0",
"EdgeDetect": "0" "EdgeDetect": "0"
}, },
{
"Unit": "CBO",
"EventCode": "0xE0",
"UMask": "0x00",
"EventName": "event-hyphen",
"BriefDescription": "UNC_CBO_HYPHEN",
"PublicDescription": "UNC_CBO_HYPHEN"
},
{
"Unit": "CBO",
"EventCode": "0xC0",
"UMask": "0x00",
"EventName": "event-two-hyph",
"BriefDescription": "UNC_CBO_TWO_HYPH",
"PublicDescription": "UNC_CBO_TWO_HYPH"
},
{ {
"EventCode": "0x7", "EventCode": "0x7",
"EventName": "uncore_hisi_l3c.rd_hit_cpipe", "EventName": "uncore_hisi_l3c.rd_hit_cpipe",
......
...@@ -17,8 +17,8 @@ static unsigned long *get_bitmap(const char *str, int nbits) ...@@ -17,8 +17,8 @@ static unsigned long *get_bitmap(const char *str, int nbits)
bm = bitmap_zalloc(nbits); bm = bitmap_zalloc(nbits);
if (map && bm) { if (map && bm) {
for (i = 0; i < map->nr; i++) for (i = 0; i < perf_cpu_map__nr(map); i++)
set_bit(map->map[i].cpu, bm); set_bit(perf_cpu_map__cpu(map, i).cpu, bm);
} }
if (map) if (map)
......
...@@ -75,10 +75,10 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused, ...@@ -75,10 +75,10 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
TEST_ASSERT_VAL("wrong id", ev->id == 123); TEST_ASSERT_VAL("wrong id", ev->id == 123);
TEST_ASSERT_VAL("wrong type", ev->type == PERF_EVENT_UPDATE__CPUS); TEST_ASSERT_VAL("wrong type", ev->type == PERF_EVENT_UPDATE__CPUS);
TEST_ASSERT_VAL("wrong cpus", map->nr == 3); TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__nr(map) == 3);
TEST_ASSERT_VAL("wrong cpus", map->map[0].cpu == 1); TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 0).cpu == 1);
TEST_ASSERT_VAL("wrong cpus", map->map[1].cpu == 2); TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 1).cpu == 2);
TEST_ASSERT_VAL("wrong cpus", map->map[2].cpu == 3); TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 2).cpu == 3);
perf_cpu_map__put(map); perf_cpu_map__put(map);
return 0; return 0;
} }
......
...@@ -25,14 +25,15 @@ static unsigned long *get_bitmap(const char *str, int nbits) ...@@ -25,14 +25,15 @@ static unsigned long *get_bitmap(const char *str, int nbits)
{ {
struct perf_cpu_map *map = perf_cpu_map__new(str); struct perf_cpu_map *map = perf_cpu_map__new(str);
unsigned long *bm = NULL; unsigned long *bm = NULL;
int i;
bm = bitmap_zalloc(nbits); bm = bitmap_zalloc(nbits);
if (map && bm) { if (map && bm) {
for (i = 0; i < map->nr; i++) { struct perf_cpu cpu;
set_bit(map->map[i].cpu, bm); int i;
}
perf_cpu_map__for_each_cpu(cpu, i, map)
set_bit(cpu.cpu, bm);
} }
if (map) if (map)
......
...@@ -59,11 +59,12 @@ static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest ...@@ -59,11 +59,12 @@ static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest
} }
CPU_ZERO(&cpu_set); CPU_ZERO(&cpu_set);
CPU_SET(cpus->map[0].cpu, &cpu_set); CPU_SET(perf_cpu_map__cpu(cpus, 0).cpu, &cpu_set);
sched_setaffinity(0, sizeof(cpu_set), &cpu_set); sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
pr_debug("sched_setaffinity() failed on CPU %d: %s ", pr_debug("sched_setaffinity() failed on CPU %d: %s ",
cpus->map[0].cpu, str_error_r(errno, sbuf, sizeof(sbuf))); perf_cpu_map__cpu(cpus, 0).cpu,
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_free_cpus; goto out_free_cpus;
} }
......
...@@ -2069,6 +2069,31 @@ static int test_event(struct evlist_test *e) ...@@ -2069,6 +2069,31 @@ static int test_event(struct evlist_test *e)
return ret; return ret;
} }
static int test_event_fake_pmu(const char *str)
{
struct parse_events_error err;
struct evlist *evlist;
int ret;
evlist = evlist__new();
if (!evlist)
return -ENOMEM;
parse_events_error__init(&err);
perf_pmu__test_parse_init();
ret = __parse_events(evlist, str, &err, &perf_pmu__fake);
if (ret) {
pr_debug("failed to parse event '%s', err %d, str '%s'\n",
str, ret, err.str);
parse_events_error__print(&err, str);
}
parse_events_error__exit(&err);
evlist__delete(evlist);
return ret;
}
static int test_events(struct evlist_test *events, unsigned cnt) static int test_events(struct evlist_test *events, unsigned cnt)
{ {
int ret1, ret2 = 0; int ret1, ret2 = 0;
...@@ -2276,6 +2301,26 @@ static int test_pmu_events_alias(char *event, char *alias) ...@@ -2276,6 +2301,26 @@ static int test_pmu_events_alias(char *event, char *alias)
return test_event(&e); return test_event(&e);
} }
static int test_pmu_events_alias2(void)
{
static const char events[][30] = {
"event-hyphen",
"event-two-hyph",
};
unsigned long i;
int ret = 0;
for (i = 0; i < ARRAY_SIZE(events); i++) {
ret = test_event_fake_pmu(&events[i][0]);
if (ret) {
pr_err("check_parse_fake %s failed\n", &events[i][0]);
break;
}
}
return ret;
}
static int test__parse_events(struct test_suite *test __maybe_unused, int subtest __maybe_unused) static int test__parse_events(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{ {
int ret1, ret2 = 0; int ret1, ret2 = 0;
...@@ -2313,6 +2358,10 @@ do { \ ...@@ -2313,6 +2358,10 @@ do { \
return ret; return ret;
} }
ret1 = test_pmu_events_alias2();
if (!ret2)
ret2 = ret1;
ret1 = test_terms(test__terms, ARRAY_SIZE(test__terms)); ret1 = test_terms(test__terms, ARRAY_SIZE(test__terms));
if (!ret2) if (!ret2)
ret2 = ret1; ret2 = ret1;
......
...@@ -143,6 +143,34 @@ static const struct perf_pmu_test_event unc_cbo_xsnp_response_miss_eviction = { ...@@ -143,6 +143,34 @@ static const struct perf_pmu_test_event unc_cbo_xsnp_response_miss_eviction = {
.matching_pmu = "uncore_cbox_0", .matching_pmu = "uncore_cbox_0",
}; };
static const struct perf_pmu_test_event uncore_hyphen = {
.event = {
.name = "event-hyphen",
.event = "umask=0x00,event=0xe0",
.desc = "Unit: uncore_cbox UNC_CBO_HYPHEN",
.topic = "uncore",
.long_desc = "UNC_CBO_HYPHEN",
.pmu = "uncore_cbox",
},
.alias_str = "umask=0,event=0xe0",
.alias_long_desc = "UNC_CBO_HYPHEN",
.matching_pmu = "uncore_cbox_0",
};
static const struct perf_pmu_test_event uncore_two_hyph = {
.event = {
.name = "event-two-hyph",
.event = "umask=0x00,event=0xc0",
.desc = "Unit: uncore_cbox UNC_CBO_TWO_HYPH",
.topic = "uncore",
.long_desc = "UNC_CBO_TWO_HYPH",
.pmu = "uncore_cbox",
},
.alias_str = "umask=0,event=0xc0",
.alias_long_desc = "UNC_CBO_TWO_HYPH",
.matching_pmu = "uncore_cbox_0",
};
static const struct perf_pmu_test_event uncore_hisi_l3c_rd_hit_cpipe = { static const struct perf_pmu_test_event uncore_hisi_l3c_rd_hit_cpipe = {
.event = { .event = {
.name = "uncore_hisi_l3c.rd_hit_cpipe", .name = "uncore_hisi_l3c.rd_hit_cpipe",
...@@ -188,6 +216,8 @@ static const struct perf_pmu_test_event uncore_imc_cache_hits = { ...@@ -188,6 +216,8 @@ static const struct perf_pmu_test_event uncore_imc_cache_hits = {
static const struct perf_pmu_test_event *uncore_events[] = { static const struct perf_pmu_test_event *uncore_events[] = {
&uncore_hisi_ddrc_flux_wcmd, &uncore_hisi_ddrc_flux_wcmd,
&unc_cbo_xsnp_response_miss_eviction, &unc_cbo_xsnp_response_miss_eviction,
&uncore_hyphen,
&uncore_two_hyph,
&uncore_hisi_l3c_rd_hit_cpipe, &uncore_hisi_l3c_rd_hit_cpipe,
&uncore_imc_free_running_cache_miss, &uncore_imc_free_running_cache_miss,
&uncore_imc_cache_hits, &uncore_imc_cache_hits,
...@@ -654,6 +684,8 @@ static struct perf_pmu_test_pmu test_pmus[] = { ...@@ -654,6 +684,8 @@ static struct perf_pmu_test_pmu test_pmus[] = {
}, },
.aliases = { .aliases = {
&unc_cbo_xsnp_response_miss_eviction, &unc_cbo_xsnp_response_miss_eviction,
&uncore_hyphen,
&uncore_two_hyph,
}, },
}, },
{ {
......
...@@ -122,44 +122,48 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) ...@@ -122,44 +122,48 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
} }
// Test that CPU ID contains socket, die, core and CPU // Test that CPU ID contains socket, die, core and CPU
for (i = 0; i < map->nr; i++) { for (i = 0; i < perf_cpu_map__nr(map); i++) {
id = aggr_cpu_id__cpu(perf_cpu_map__cpu(map, i), NULL); id = aggr_cpu_id__cpu(perf_cpu_map__cpu(map, i), NULL);
TEST_ASSERT_VAL("Cpu map - CPU ID doesn't match", map->map[i].cpu == id.cpu.cpu); TEST_ASSERT_VAL("Cpu map - CPU ID doesn't match",
perf_cpu_map__cpu(map, i).cpu == id.cpu.cpu);
TEST_ASSERT_VAL("Cpu map - Core ID doesn't match", TEST_ASSERT_VAL("Cpu map - Core ID doesn't match",
session->header.env.cpu[map->map[i].cpu].core_id == id.core); session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].core_id == id.core);
TEST_ASSERT_VAL("Cpu map - Socket ID doesn't match", TEST_ASSERT_VAL("Cpu map - Socket ID doesn't match",
session->header.env.cpu[map->map[i].cpu].socket_id == id.socket); session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id ==
id.socket);
TEST_ASSERT_VAL("Cpu map - Die ID doesn't match", TEST_ASSERT_VAL("Cpu map - Die ID doesn't match",
session->header.env.cpu[map->map[i].cpu].die_id == id.die); session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die);
TEST_ASSERT_VAL("Cpu map - Node ID is set", id.node == -1); TEST_ASSERT_VAL("Cpu map - Node ID is set", id.node == -1);
TEST_ASSERT_VAL("Cpu map - Thread is set", id.thread == -1); TEST_ASSERT_VAL("Cpu map - Thread is set", id.thread == -1);
} }
// Test that core ID contains socket, die and core // Test that core ID contains socket, die and core
for (i = 0; i < map->nr; i++) { for (i = 0; i < perf_cpu_map__nr(map); i++) {
id = aggr_cpu_id__core(perf_cpu_map__cpu(map, i), NULL); id = aggr_cpu_id__core(perf_cpu_map__cpu(map, i), NULL);
TEST_ASSERT_VAL("Core map - Core ID doesn't match", TEST_ASSERT_VAL("Core map - Core ID doesn't match",
session->header.env.cpu[map->map[i].cpu].core_id == id.core); session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].core_id == id.core);
TEST_ASSERT_VAL("Core map - Socket ID doesn't match", TEST_ASSERT_VAL("Core map - Socket ID doesn't match",
session->header.env.cpu[map->map[i].cpu].socket_id == id.socket); session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id ==
id.socket);
TEST_ASSERT_VAL("Core map - Die ID doesn't match", TEST_ASSERT_VAL("Core map - Die ID doesn't match",
session->header.env.cpu[map->map[i].cpu].die_id == id.die); session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die);
TEST_ASSERT_VAL("Core map - Node ID is set", id.node == -1); TEST_ASSERT_VAL("Core map - Node ID is set", id.node == -1);
TEST_ASSERT_VAL("Core map - Thread is set", id.thread == -1); TEST_ASSERT_VAL("Core map - Thread is set", id.thread == -1);
} }
// Test that die ID contains socket and die // Test that die ID contains socket and die
for (i = 0; i < map->nr; i++) { for (i = 0; i < perf_cpu_map__nr(map); i++) {
id = aggr_cpu_id__die(perf_cpu_map__cpu(map, i), NULL); id = aggr_cpu_id__die(perf_cpu_map__cpu(map, i), NULL);
TEST_ASSERT_VAL("Die map - Socket ID doesn't match", TEST_ASSERT_VAL("Die map - Socket ID doesn't match",
session->header.env.cpu[map->map[i].cpu].socket_id == id.socket); session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id ==
id.socket);
TEST_ASSERT_VAL("Die map - Die ID doesn't match", TEST_ASSERT_VAL("Die map - Die ID doesn't match",
session->header.env.cpu[map->map[i].cpu].die_id == id.die); session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die);
TEST_ASSERT_VAL("Die map - Node ID is set", id.node == -1); TEST_ASSERT_VAL("Die map - Node ID is set", id.node == -1);
TEST_ASSERT_VAL("Die map - Core is set", id.core == -1); TEST_ASSERT_VAL("Die map - Core is set", id.core == -1);
...@@ -168,10 +172,11 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) ...@@ -168,10 +172,11 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
} }
// Test that socket ID contains only socket // Test that socket ID contains only socket
for (i = 0; i < map->nr; i++) { for (i = 0; i < perf_cpu_map__nr(map); i++) {
id = aggr_cpu_id__socket(perf_cpu_map__cpu(map, i), NULL); id = aggr_cpu_id__socket(perf_cpu_map__cpu(map, i), NULL);
TEST_ASSERT_VAL("Socket map - Socket ID doesn't match", TEST_ASSERT_VAL("Socket map - Socket ID doesn't match",
session->header.env.cpu[map->map[i].cpu].socket_id == id.socket); session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id ==
id.socket);
TEST_ASSERT_VAL("Socket map - Node ID is set", id.node == -1); TEST_ASSERT_VAL("Socket map - Node ID is set", id.node == -1);
TEST_ASSERT_VAL("Socket map - Die ID is set", id.die == -1); TEST_ASSERT_VAL("Socket map - Die ID is set", id.die == -1);
...@@ -181,10 +186,10 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map) ...@@ -181,10 +186,10 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
} }
// Test that node ID contains only node // Test that node ID contains only node
for (i = 0; i < map->nr; i++) { for (i = 0; i < perf_cpu_map__nr(map); i++) {
id = aggr_cpu_id__node(perf_cpu_map__cpu(map, i), NULL); id = aggr_cpu_id__node(perf_cpu_map__cpu(map, i), NULL);
TEST_ASSERT_VAL("Node map - Node ID doesn't match", TEST_ASSERT_VAL("Node map - Node ID doesn't match",
cpu__get_node(map->map[i]) == id.node); cpu__get_node(perf_cpu_map__cpu(map, i)) == id.node);
TEST_ASSERT_VAL("Node map - Socket is set", id.socket == -1); TEST_ASSERT_VAL("Node map - Socket is set", id.socket == -1);
TEST_ASSERT_VAL("Node map - Die ID is set", id.die == -1); TEST_ASSERT_VAL("Node map - Die ID is set", id.die == -1);
TEST_ASSERT_VAL("Node map - Core is set", id.core == -1); TEST_ASSERT_VAL("Node map - Core is set", id.core == -1);
......
...@@ -62,7 +62,7 @@ void affinity__set(struct affinity *a, int cpu) ...@@ -62,7 +62,7 @@ void affinity__set(struct affinity *a, int cpu)
clear_bit(cpu, a->sched_cpus); clear_bit(cpu, a->sched_cpus);
} }
void affinity__cleanup(struct affinity *a) static void __affinity__cleanup(struct affinity *a)
{ {
int cpu_set_size = get_cpu_set_size(); int cpu_set_size = get_cpu_set_size();
...@@ -71,3 +71,9 @@ void affinity__cleanup(struct affinity *a) ...@@ -71,3 +71,9 @@ void affinity__cleanup(struct affinity *a)
zfree(&a->sched_cpus); zfree(&a->sched_cpus);
zfree(&a->orig_cpus); zfree(&a->orig_cpus);
} }
void affinity__cleanup(struct affinity *a)
{
if (a != NULL)
__affinity__cleanup(a);
}
...@@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, ...@@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
mp->idx = idx; mp->idx = idx;
if (per_cpu) { if (per_cpu) {
mp->cpu = evlist->core.cpus->map[idx]; mp->cpu = perf_cpu_map__cpu(evlist->core.cpus, idx);
if (evlist->core.threads) if (evlist->core.threads)
mp->tid = perf_thread_map__pid(evlist->core.threads, 0); mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
else else
......
...@@ -61,7 +61,7 @@ int evsel__alloc_counts(struct evsel *evsel) ...@@ -61,7 +61,7 @@ int evsel__alloc_counts(struct evsel *evsel)
struct perf_cpu_map *cpus = evsel__cpus(evsel); struct perf_cpu_map *cpus = evsel__cpus(evsel);
int nthreads = perf_thread_map__nr(evsel->core.threads); int nthreads = perf_thread_map__nr(evsel->core.threads);
evsel->counts = perf_counts__new(cpus ? cpus->nr : 1, nthreads); evsel->counts = perf_counts__new(perf_cpu_map__nr(cpus), nthreads);
return evsel->counts != NULL ? 0 : -ENOMEM; return evsel->counts != NULL ? 0 : -ENOMEM;
} }
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
#include <stdbool.h> #include <stdbool.h>
#include <stdio.h> #include <stdio.h>
#include <stdbool.h>
#include <internal/cpumap.h> #include <internal/cpumap.h>
#include <perf/cpumap.h> #include <perf/cpumap.h>
...@@ -57,7 +56,7 @@ struct perf_cpu cpu__max_present_cpu(void); ...@@ -57,7 +56,7 @@ struct perf_cpu cpu__max_present_cpu(void);
*/ */
static inline bool cpu_map__is_dummy(struct perf_cpu_map *cpus) static inline bool cpu_map__is_dummy(struct perf_cpu_map *cpus)
{ {
return cpus->nr == 1 && cpus->map[0].cpu == -1; return perf_cpu_map__nr(cpus) == 1 && perf_cpu_map__cpu(cpus, 0).cpu == -1;
} }
/** /**
......
...@@ -325,7 +325,7 @@ struct numa_topology *numa_topology__new(void) ...@@ -325,7 +325,7 @@ struct numa_topology *numa_topology__new(void)
if (!node_map) if (!node_map)
goto out; goto out;
nr = (u32) node_map->nr; nr = (u32) perf_cpu_map__nr(node_map);
tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr); tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr);
if (!tp) if (!tp)
...@@ -334,7 +334,7 @@ struct numa_topology *numa_topology__new(void) ...@@ -334,7 +334,7 @@ struct numa_topology *numa_topology__new(void)
tp->nr = nr; tp->nr = nr;
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
if (load_numa_node(&tp->nodes[i], node_map->map[i].cpu)) { if (load_numa_node(&tp->nodes[i], perf_cpu_map__cpu(node_map, i).cpu)) {
numa_topology__delete(tp); numa_topology__delete(tp);
tp = NULL; tp = NULL;
break; break;
......
...@@ -124,22 +124,23 @@ int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list) ...@@ -124,22 +124,23 @@ int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list)
events_nr++; events_nr++;
if (matched_cpus->nr > 0 && (unmatched_cpus->nr > 0 || if (perf_cpu_map__nr(matched_cpus) > 0 &&
matched_cpus->nr < cpus->nr || (perf_cpu_map__nr(unmatched_cpus) > 0 ||
matched_cpus->nr < pmu->cpus->nr)) { perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(cpus) ||
perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(pmu->cpus))) {
perf_cpu_map__put(evsel->core.cpus); perf_cpu_map__put(evsel->core.cpus);
perf_cpu_map__put(evsel->core.own_cpus); perf_cpu_map__put(evsel->core.own_cpus);
evsel->core.cpus = perf_cpu_map__get(matched_cpus); evsel->core.cpus = perf_cpu_map__get(matched_cpus);
evsel->core.own_cpus = perf_cpu_map__get(matched_cpus); evsel->core.own_cpus = perf_cpu_map__get(matched_cpus);
if (unmatched_cpus->nr > 0) { if (perf_cpu_map__nr(unmatched_cpus) > 0) {
cpu_map__snprint(matched_cpus, buf1, sizeof(buf1)); cpu_map__snprint(matched_cpus, buf1, sizeof(buf1));
pr_warning("WARNING: use %s in '%s' for '%s', skip other cpus in list.\n", pr_warning("WARNING: use %s in '%s' for '%s', skip other cpus in list.\n",
buf1, pmu->name, evsel->name); buf1, pmu->name, evsel->name);
} }
} }
if (matched_cpus->nr == 0) { if (perf_cpu_map__nr(matched_cpus) == 0) {
evlist__remove(evlist, evsel); evlist__remove(evlist, evsel);
evsel__delete(evsel); evsel__delete(evsel);
......
...@@ -430,15 +430,19 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name) ...@@ -430,15 +430,19 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
{ {
struct evsel *pos; struct evsel *pos;
struct evlist_cpu_iterator evlist_cpu_itr; struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity affinity; struct affinity saved_affinity, *affinity = NULL;
bool has_imm = false; bool has_imm = false;
if (affinity__setup(&affinity) < 0) // See explanation in evlist__close()
if (!cpu_map__is_dummy(evlist->core.cpus)) {
if (affinity__setup(&saved_affinity) < 0)
return; return;
affinity = &saved_affinity;
}
/* Disable 'immediate' events last */ /* Disable 'immediate' events last */
for (int imm = 0; imm <= 1; imm++) { for (int imm = 0; imm <= 1; imm++) {
evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) { evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) {
pos = evlist_cpu_itr.evsel; pos = evlist_cpu_itr.evsel;
if (evsel__strcmp(pos, evsel_name)) if (evsel__strcmp(pos, evsel_name))
continue; continue;
...@@ -454,7 +458,7 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name) ...@@ -454,7 +458,7 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
break; break;
} }
affinity__cleanup(&affinity); affinity__cleanup(affinity);
evlist__for_each_entry(evlist, pos) { evlist__for_each_entry(evlist, pos) {
if (evsel__strcmp(pos, evsel_name)) if (evsel__strcmp(pos, evsel_name))
continue; continue;
...@@ -487,12 +491,16 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name) ...@@ -487,12 +491,16 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name)
{ {
struct evsel *pos; struct evsel *pos;
struct evlist_cpu_iterator evlist_cpu_itr; struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity affinity; struct affinity saved_affinity, *affinity = NULL;
if (affinity__setup(&affinity) < 0) // See explanation in evlist__close()
if (!cpu_map__is_dummy(evlist->core.cpus)) {
if (affinity__setup(&saved_affinity) < 0)
return; return;
affinity = &saved_affinity;
}
evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) { evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) {
pos = evlist_cpu_itr.evsel; pos = evlist_cpu_itr.evsel;
if (evsel__strcmp(pos, evsel_name)) if (evsel__strcmp(pos, evsel_name))
continue; continue;
...@@ -500,7 +508,7 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name) ...@@ -500,7 +508,7 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name)
continue; continue;
evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx); evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
} }
affinity__cleanup(&affinity); affinity__cleanup(affinity);
evlist__for_each_entry(evlist, pos) { evlist__for_each_entry(evlist, pos) {
if (evsel__strcmp(pos, evsel_name)) if (evsel__strcmp(pos, evsel_name))
continue; continue;
......
...@@ -1064,6 +1064,17 @@ void __weak arch_evsel__fixup_new_cycles(struct perf_event_attr *attr __maybe_un ...@@ -1064,6 +1064,17 @@ void __weak arch_evsel__fixup_new_cycles(struct perf_event_attr *attr __maybe_un
{ {
} }
static void evsel__set_default_freq_period(struct record_opts *opts,
struct perf_event_attr *attr)
{
if (opts->freq) {
attr->freq = 1;
attr->sample_freq = opts->freq;
} else {
attr->sample_period = opts->default_interval;
}
}
/* /*
* The enable_on_exec/disabled value strategy: * The enable_on_exec/disabled value strategy:
* *
...@@ -1130,14 +1141,12 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts, ...@@ -1130,14 +1141,12 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
* We default some events to have a default interval. But keep * We default some events to have a default interval. But keep
* it a weak assumption overridable by the user. * it a weak assumption overridable by the user.
*/ */
if (!attr->sample_period) { if ((evsel->is_libpfm_event && !attr->sample_period) ||
if (opts->freq) { (!evsel->is_libpfm_event && (!attr->sample_period ||
attr->freq = 1; opts->user_freq != UINT_MAX ||
attr->sample_freq = opts->freq; opts->user_interval != ULLONG_MAX)))
} else { evsel__set_default_freq_period(opts, attr);
attr->sample_period = opts->default_interval;
}
}
/* /*
* If attr->freq was set (here or earlier), ask for period * If attr->freq was set (here or earlier), ask for period
* to be sampled. * to be sampled.
...@@ -1782,7 +1791,7 @@ static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, ...@@ -1782,7 +1791,7 @@ static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
nthreads = threads->nr; nthreads = threads->nr;
if (evsel->core.fd == NULL && if (evsel->core.fd == NULL &&
perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0) perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0)
return -ENOMEM; return -ENOMEM;
evsel->open_flags = PERF_FLAG_FD_CLOEXEC; evsel->open_flags = PERF_FLAG_FD_CLOEXEC;
...@@ -2020,9 +2029,10 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, ...@@ -2020,9 +2029,10 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
test_attr__ready(); test_attr__ready();
pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx", pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
pid, cpus->map[idx].cpu, group_fd, evsel->open_flags); pid, perf_cpu_map__cpu(cpus, idx).cpu, group_fd, evsel->open_flags);
fd = sys_perf_event_open(&evsel->core.attr, pid, cpus->map[idx].cpu, fd = sys_perf_event_open(&evsel->core.attr, pid,
perf_cpu_map__cpu(cpus, idx).cpu,
group_fd, evsel->open_flags); group_fd, evsel->open_flags);
FD(evsel, idx, thread) = fd; FD(evsel, idx, thread) = fd;
...@@ -2038,7 +2048,8 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, ...@@ -2038,7 +2048,8 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
bpf_counter__install_pe(evsel, idx, fd); bpf_counter__install_pe(evsel, idx, fd);
if (unlikely(test_attr__enabled)) { if (unlikely(test_attr__enabled)) {
test_attr__open(&evsel->core.attr, pid, cpus->map[idx], test_attr__open(&evsel->core.attr, pid,
perf_cpu_map__cpu(cpus, idx),
fd, group_fd, evsel->open_flags); fd, group_fd, evsel->open_flags);
} }
...@@ -2079,7 +2090,8 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, ...@@ -2079,7 +2090,8 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
if (evsel__precise_ip_fallback(evsel)) if (evsel__precise_ip_fallback(evsel))
goto retry_open; goto retry_open;
if (evsel__ignore_missing_thread(evsel, cpus->nr, idx, threads, thread, err)) { if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus),
idx, threads, thread, err)) {
/* We just removed 1 thread, so lower the upper nthreads limit. */ /* We just removed 1 thread, so lower the upper nthreads limit. */
nthreads--; nthreads--;
...@@ -2119,7 +2131,7 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, ...@@ -2119,7 +2131,7 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads) struct perf_thread_map *threads)
{ {
return evsel__open_cpu(evsel, cpus, threads, 0, cpus ? cpus->nr : 1); return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus));
} }
void evsel__close(struct evsel *evsel) void evsel__close(struct evsel *evsel)
...@@ -2131,8 +2143,7 @@ void evsel__close(struct evsel *evsel) ...@@ -2131,8 +2143,7 @@ void evsel__close(struct evsel *evsel)
int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx) int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx)
{ {
if (cpu_map_idx == -1) if (cpu_map_idx == -1)
return evsel__open_cpu(evsel, cpus, NULL, 0, return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus));
cpus ? cpus->nr : 1);
return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1); return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1);
} }
...@@ -2982,7 +2993,7 @@ int evsel__store_ids(struct evsel *evsel, struct evlist *evlist) ...@@ -2982,7 +2993,7 @@ int evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
struct perf_cpu_map *cpus = evsel->core.cpus; struct perf_cpu_map *cpus = evsel->core.cpus;
struct perf_thread_map *threads = evsel->core.threads; struct perf_thread_map *threads = evsel->core.threads;
if (perf_evsel__alloc_id(&evsel->core, cpus->nr, threads->nr)) if (perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr))
return -ENOMEM; return -ENOMEM;
return store_evsel_ids(evsel, evlist); return store_evsel_ids(evsel, evlist);
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <perf/evsel.h> #include <perf/evsel.h>
#include "symbol_conf.h" #include "symbol_conf.h"
#include <internal/cpumap.h> #include <internal/cpumap.h>
#include <perf/cpumap.h>
struct bpf_object; struct bpf_object;
struct cgroup; struct cgroup;
...@@ -191,7 +192,7 @@ static inline struct perf_cpu_map *evsel__cpus(struct evsel *evsel) ...@@ -191,7 +192,7 @@ static inline struct perf_cpu_map *evsel__cpus(struct evsel *evsel)
static inline int evsel__nr_cpus(struct evsel *evsel) static inline int evsel__nr_cpus(struct evsel *evsel)
{ {
return evsel__cpus(evsel)->nr; return perf_cpu_map__nr(evsel__cpus(evsel));
} }
void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread, void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include "map_symbol.h" #include "map_symbol.h"
#include "branch.h" #include "branch.h"
#include "mem-events.h" #include "mem-events.h"
#include "path.h"
#include "srcline.h" #include "srcline.h"
#include "symbol.h" #include "symbol.h"
#include "sort.h" #include "sort.h"
...@@ -1416,7 +1417,7 @@ static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, i ...@@ -1416,7 +1417,7 @@ static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, i
struct stat st; struct stat st;
/*sshfs might return bad dent->d_type, so we have to stat*/ /*sshfs might return bad dent->d_type, so we have to stat*/
snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); path__join(path, sizeof(path), dir_name, dent->d_name);
if (stat(path, &st)) if (stat(path, &st))
continue; continue;
......
...@@ -250,7 +250,7 @@ static void build_node_mask(int node, struct mmap_cpu_mask *mask) ...@@ -250,7 +250,7 @@ static void build_node_mask(int node, struct mmap_cpu_mask *mask)
nr_cpus = perf_cpu_map__nr(cpu_map); nr_cpus = perf_cpu_map__nr(cpu_map);
for (idx = 0; idx < nr_cpus; idx++) { for (idx = 0; idx < nr_cpus; idx++) {
cpu = cpu_map->map[idx]; /* map c index to online cpu index */ cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */
if (cpu__get_node(cpu) == node) if (cpu__get_node(cpu) == node)
set_bit(cpu.cpu, mask->bits); set_bit(cpu.cpu, mask->bits);
} }
......
...@@ -1697,6 +1697,15 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state, ...@@ -1697,6 +1697,15 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
} }
} }
} }
if (parse_state->fake_pmu) {
if (!parse_events_add_pmu(parse_state, list, str, head,
true, true)) {
pr_debug("%s -> %s/%s/\n", str, "fake_pmu", str);
ok++;
}
}
out_err: out_err:
if (ok) if (ok)
*listp = list; *listp = list;
...@@ -2098,8 +2107,17 @@ static void perf_pmu__parse_init(void) ...@@ -2098,8 +2107,17 @@ static void perf_pmu__parse_init(void)
pmu = NULL; pmu = NULL;
while ((pmu = perf_pmu__scan(pmu)) != NULL) { while ((pmu = perf_pmu__scan(pmu)) != NULL) {
list_for_each_entry(alias, &pmu->aliases, list) { list_for_each_entry(alias, &pmu->aliases, list) {
if (strchr(alias->name, '-')) char *tmp = strchr(alias->name, '-');
if (tmp) {
char *tmp2 = NULL;
tmp2 = strchr(tmp + 1, '-');
len++;
if (tmp2)
len++; len++;
}
len++; len++;
} }
} }
...@@ -2119,8 +2137,20 @@ static void perf_pmu__parse_init(void) ...@@ -2119,8 +2137,20 @@ static void perf_pmu__parse_init(void)
list_for_each_entry(alias, &pmu->aliases, list) { list_for_each_entry(alias, &pmu->aliases, list) {
struct perf_pmu_event_symbol *p = perf_pmu_events_list + len; struct perf_pmu_event_symbol *p = perf_pmu_events_list + len;
char *tmp = strchr(alias->name, '-'); char *tmp = strchr(alias->name, '-');
char *tmp2 = NULL;
if (tmp != NULL) { if (tmp)
tmp2 = strchr(tmp + 1, '-');
if (tmp2) {
SET_SYMBOL(strndup(alias->name, tmp - alias->name),
PMU_EVENT_SYMBOL_PREFIX);
p++;
tmp++;
SET_SYMBOL(strndup(tmp, tmp2 - tmp), PMU_EVENT_SYMBOL_SUFFIX);
p++;
SET_SYMBOL(strdup(++tmp2), PMU_EVENT_SYMBOL_SUFFIX2);
len += 3;
} else if (tmp) {
SET_SYMBOL(strndup(alias->name, tmp - alias->name), SET_SYMBOL(strndup(alias->name, tmp - alias->name),
PMU_EVENT_SYMBOL_PREFIX); PMU_EVENT_SYMBOL_PREFIX);
p++; p++;
...@@ -2147,23 +2177,38 @@ static void perf_pmu__parse_init(void) ...@@ -2147,23 +2177,38 @@ static void perf_pmu__parse_init(void)
*/ */
int perf_pmu__test_parse_init(void) int perf_pmu__test_parse_init(void)
{ {
struct perf_pmu_event_symbol *list; struct perf_pmu_event_symbol *list, *tmp, symbols[] = {
{(char *)"read", PMU_EVENT_SYMBOL},
{(char *)"event", PMU_EVENT_SYMBOL_PREFIX},
{(char *)"two", PMU_EVENT_SYMBOL_SUFFIX},
{(char *)"hyphen", PMU_EVENT_SYMBOL_SUFFIX},
{(char *)"hyph", PMU_EVENT_SYMBOL_SUFFIX2},
};
unsigned long i, j;
list = malloc(sizeof(*list) * 1); tmp = list = malloc(sizeof(*list) * ARRAY_SIZE(symbols));
if (!list) if (!list)
return -ENOMEM; return -ENOMEM;
list->type = PMU_EVENT_SYMBOL; for (i = 0; i < ARRAY_SIZE(symbols); i++, tmp++) {
list->symbol = strdup("read"); tmp->type = symbols[i].type;
tmp->symbol = strdup(symbols[i].symbol);
if (!list->symbol) { if (!list->symbol)
free(list); goto err_free;
return -ENOMEM;
} }
perf_pmu_events_list = list; perf_pmu_events_list = list;
perf_pmu_events_list_num = 1; perf_pmu_events_list_num = ARRAY_SIZE(symbols);
qsort(perf_pmu_events_list, ARRAY_SIZE(symbols),
sizeof(struct perf_pmu_event_symbol), comp_pmu);
return 0; return 0;
err_free:
for (j = 0, tmp = list; j < i; j++, tmp++)
free(tmp->symbol);
free(list);
return -ENOMEM;
} }
enum perf_pmu_event_symbol_type enum perf_pmu_event_symbol_type
......
...@@ -53,6 +53,7 @@ enum perf_pmu_event_symbol_type { ...@@ -53,6 +53,7 @@ enum perf_pmu_event_symbol_type {
PMU_EVENT_SYMBOL, /* normal style PMU event */ PMU_EVENT_SYMBOL, /* normal style PMU event */
PMU_EVENT_SYMBOL_PREFIX, /* prefix of pre-suf style event */ PMU_EVENT_SYMBOL_PREFIX, /* prefix of pre-suf style event */
PMU_EVENT_SYMBOL_SUFFIX, /* suffix of pre-suf style event */ PMU_EVENT_SYMBOL_SUFFIX, /* suffix of pre-suf style event */
PMU_EVENT_SYMBOL_SUFFIX2, /* suffix of pre-suf2 style event */
}; };
struct perf_pmu_event_symbol { struct perf_pmu_event_symbol {
......
...@@ -149,6 +149,8 @@ static int pmu_str_check(yyscan_t scanner, struct parse_events_state *parse_stat ...@@ -149,6 +149,8 @@ static int pmu_str_check(yyscan_t scanner, struct parse_events_state *parse_stat
return PE_PMU_EVENT_PRE; return PE_PMU_EVENT_PRE;
case PMU_EVENT_SYMBOL_SUFFIX: case PMU_EVENT_SYMBOL_SUFFIX:
return PE_PMU_EVENT_SUF; return PE_PMU_EVENT_SUF;
case PMU_EVENT_SYMBOL_SUFFIX2:
return PE_PMU_EVENT_SUF2;
case PMU_EVENT_SYMBOL: case PMU_EVENT_SYMBOL:
return parse_state->fake_pmu return parse_state->fake_pmu
? PE_PMU_EVENT_FAKE : PE_KERNEL_PMU_EVENT; ? PE_PMU_EVENT_FAKE : PE_KERNEL_PMU_EVENT;
......
...@@ -69,7 +69,7 @@ static void inc_group_count(struct list_head *list, ...@@ -69,7 +69,7 @@ static void inc_group_count(struct list_head *list,
%token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT %token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT
%token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP %token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP
%token PE_ERROR %token PE_ERROR
%token PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE %token PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_PMU_EVENT_SUF2 PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE
%token PE_ARRAY_ALL PE_ARRAY_RANGE %token PE_ARRAY_ALL PE_ARRAY_RANGE
%token PE_DRV_CFG_TERM %token PE_DRV_CFG_TERM
%type <num> PE_VALUE %type <num> PE_VALUE
...@@ -87,7 +87,7 @@ static void inc_group_count(struct list_head *list, ...@@ -87,7 +87,7 @@ static void inc_group_count(struct list_head *list,
%type <str> PE_MODIFIER_EVENT %type <str> PE_MODIFIER_EVENT
%type <str> PE_MODIFIER_BP %type <str> PE_MODIFIER_BP
%type <str> PE_EVENT_NAME %type <str> PE_EVENT_NAME
%type <str> PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE %type <str> PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_PMU_EVENT_SUF2 PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE
%type <str> PE_DRV_CFG_TERM %type <str> PE_DRV_CFG_TERM
%type <str> event_pmu_name %type <str> event_pmu_name
%destructor { free ($$); } <str> %destructor { free ($$); } <str>
...@@ -372,6 +372,19 @@ PE_KERNEL_PMU_EVENT opt_pmu_config ...@@ -372,6 +372,19 @@ PE_KERNEL_PMU_EVENT opt_pmu_config
$$ = list; $$ = list;
} }
| |
PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF '-' PE_PMU_EVENT_SUF2 sep_dc
{
struct list_head *list;
char pmu_name[128];
snprintf(pmu_name, sizeof(pmu_name), "%s-%s-%s", $1, $3, $5);
free($1);
free($3);
free($5);
if (parse_events_multi_pmu_add(_parse_state, pmu_name, NULL, &list) < 0)
YYABORT;
$$ = list;
}
|
PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc
{ {
struct list_head *list; struct list_head *list;
......
...@@ -67,7 +67,7 @@ static bool perf_probe_api(setup_probe_fn_t fn) ...@@ -67,7 +67,7 @@ static bool perf_probe_api(setup_probe_fn_t fn)
cpus = perf_cpu_map__new(NULL); cpus = perf_cpu_map__new(NULL);
if (!cpus) if (!cpus)
return false; return false;
cpu = cpus->map[0]; cpu = perf_cpu_map__cpu(cpus, 0);
perf_cpu_map__put(cpus); perf_cpu_map__put(cpus);
do { do {
...@@ -144,7 +144,7 @@ bool perf_can_record_cpu_wide(void) ...@@ -144,7 +144,7 @@ bool perf_can_record_cpu_wide(void)
if (!cpus) if (!cpus)
return false; return false;
cpu = cpus->map[0]; cpu = perf_cpu_map__cpu(cpus, 0);
perf_cpu_map__put(cpus); perf_cpu_map__put(cpus);
fd = sys_perf_event_open(&attr, -1, cpu.cpu, -1, 0); fd = sys_perf_event_open(&attr, -1, cpu.cpu, -1, 0);
......
...@@ -3083,6 +3083,9 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, ...@@ -3083,6 +3083,9 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
for (j = 0; j < num_matched_functions; j++) { for (j = 0; j < num_matched_functions; j++) {
sym = syms[j]; sym = syms[j];
if (sym->type != STT_FUNC)
continue;
/* There can be duplicated symbols in the map */ /* There can be duplicated symbols in the map */
for (i = 0; i < j; i++) for (i = 0; i < j; i++)
if (sym->start == syms[i]->start) { if (sym->start == syms[i]->start) {
......
...@@ -638,17 +638,17 @@ static Py_ssize_t pyrf_cpu_map__length(PyObject *obj) ...@@ -638,17 +638,17 @@ static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
{ {
struct pyrf_cpu_map *pcpus = (void *)obj; struct pyrf_cpu_map *pcpus = (void *)obj;
return pcpus->cpus->nr; return perf_cpu_map__nr(pcpus->cpus);
} }
static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i) static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
{ {
struct pyrf_cpu_map *pcpus = (void *)obj; struct pyrf_cpu_map *pcpus = (void *)obj;
if (i >= pcpus->cpus->nr) if (i >= perf_cpu_map__nr(pcpus->cpus))
return NULL; return NULL;
return Py_BuildValue("i", pcpus->cpus->map[i]); return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
} }
static PySequenceMethods pyrf_cpu_map__sequence_methods = { static PySequenceMethods pyrf_cpu_map__sequence_methods = {
......
...@@ -106,7 +106,7 @@ void evlist__config(struct evlist *evlist, struct record_opts *opts, struct call ...@@ -106,7 +106,7 @@ void evlist__config(struct evlist *evlist, struct record_opts *opts, struct call
if (opts->group) if (opts->group)
evlist__set_leader(evlist); evlist__set_leader(evlist);
if (evlist->core.cpus->map[0].cpu < 0) if (perf_cpu_map__cpu(evlist->core.cpus, 0).cpu < 0)
opts->no_inherit = true; opts->no_inherit = true;
use_comm_exec = perf_can_comm_exec(); use_comm_exec = perf_can_comm_exec();
...@@ -248,11 +248,11 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str) ...@@ -248,11 +248,11 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
struct perf_cpu_map *cpus = perf_cpu_map__new(NULL); struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
if (cpus) if (cpus)
cpu = cpus->map[0]; cpu = perf_cpu_map__cpu(cpus, 0);
perf_cpu_map__put(cpus); perf_cpu_map__put(cpus);
} else { } else {
cpu = evlist->core.cpus->map[0]; cpu = perf_cpu_map__cpu(evlist->core.cpus, 0);
} }
while (1) { while (1) {
......
...@@ -1607,8 +1607,8 @@ static void python_process_stat(struct perf_stat_config *config, ...@@ -1607,8 +1607,8 @@ static void python_process_stat(struct perf_stat_config *config,
} }
for (thread = 0; thread < threads->nr; thread++) { for (thread = 0; thread < threads->nr; thread++) {
for (cpu = 0; cpu < cpus->nr; cpu++) { for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) {
process_stat(counter, cpus->map[cpu], process_stat(counter, perf_cpu_map__cpu(cpus, cpu),
perf_thread_map__pid(threads, thread), tstamp, perf_thread_map__pid(threads, thread), tstamp,
perf_counts(counter->counts, cpu, thread)); perf_counts(counter->counts, cpu, thread));
} }
......
...@@ -2537,8 +2537,8 @@ int perf_session__cpu_bitmap(struct perf_session *session, ...@@ -2537,8 +2537,8 @@ int perf_session__cpu_bitmap(struct perf_session *session,
return -1; return -1;
} }
for (i = 0; i < map->nr; i++) { for (i = 0; i < perf_cpu_map__nr(map); i++) {
struct perf_cpu cpu = map->map[i]; struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
if (cpu.cpu >= nr_cpus) { if (cpu.cpu >= nr_cpus) {
pr_err("Requested CPU %d too large. " pr_err("Requested CPU %d too large. "
......
...@@ -734,8 +734,8 @@ static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus) ...@@ -734,8 +734,8 @@ static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus)
if (!m) if (!m)
return -1; return -1;
for (i = 0; i < m->nr; i++) { for (i = 0; i < perf_cpu_map__nr(m); i++) {
c = m->map[i]; c = perf_cpu_map__cpu(m, i);
if (c.cpu >= nr_cpus) { if (c.cpu >= nr_cpus) {
ret = -1; ret = -1;
break; break;
......
...@@ -1186,12 +1186,12 @@ int perf_event__synthesize_thread_map2(struct perf_tool *tool, ...@@ -1186,12 +1186,12 @@ int perf_event__synthesize_thread_map2(struct perf_tool *tool,
static void synthesize_cpus(struct cpu_map_entries *cpus, static void synthesize_cpus(struct cpu_map_entries *cpus,
struct perf_cpu_map *map) struct perf_cpu_map *map)
{ {
int i; int i, map_nr = perf_cpu_map__nr(map);
cpus->nr = map->nr; cpus->nr = map_nr;
for (i = 0; i < map->nr; i++) for (i = 0; i < map_nr; i++)
cpus->cpu[i] = map->map[i].cpu; cpus->cpu[i] = perf_cpu_map__cpu(map, i).cpu;
} }
static void synthesize_mask(struct perf_record_record_cpu_map *mask, static void synthesize_mask(struct perf_record_record_cpu_map *mask,
...@@ -1202,13 +1202,13 @@ static void synthesize_mask(struct perf_record_record_cpu_map *mask, ...@@ -1202,13 +1202,13 @@ static void synthesize_mask(struct perf_record_record_cpu_map *mask,
mask->nr = BITS_TO_LONGS(max); mask->nr = BITS_TO_LONGS(max);
mask->long_size = sizeof(long); mask->long_size = sizeof(long);
for (i = 0; i < map->nr; i++) for (i = 0; i < perf_cpu_map__nr(map); i++)
set_bit(map->map[i].cpu, mask->mask); set_bit(perf_cpu_map__cpu(map, i).cpu, mask->mask);
} }
static size_t cpus_size(struct perf_cpu_map *map) static size_t cpus_size(struct perf_cpu_map *map)
{ {
return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16); return sizeof(struct cpu_map_entries) + perf_cpu_map__nr(map) * sizeof(u16);
} }
static size_t mask_size(struct perf_cpu_map *map, int *max) static size_t mask_size(struct perf_cpu_map *map, int *max)
...@@ -1217,9 +1217,9 @@ static size_t mask_size(struct perf_cpu_map *map, int *max) ...@@ -1217,9 +1217,9 @@ static size_t mask_size(struct perf_cpu_map *map, int *max)
*max = 0; *max = 0;
for (i = 0; i < map->nr; i++) { for (i = 0; i < perf_cpu_map__nr(map); i++) {
/* bit position of the cpu is + 1 */ /* bit position of the cpu is + 1 */
int bit = map->map[i].cpu + 1; int bit = perf_cpu_map__cpu(map, i).cpu + 1;
if (bit > *max) if (bit > *max)
*max = bit; *max = bit;
......
...@@ -95,15 +95,15 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) ...@@ -95,15 +95,15 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
if (target->cpu_list) if (target->cpu_list)
ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
top->evlist->core.cpus->nr > 1 ? "s" : "", perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : "",
target->cpu_list); target->cpu_list);
else { else {
if (target->tid) if (target->tid)
ret += SNPRINTF(bf + ret, size - ret, ")"); ret += SNPRINTF(bf + ret, size - ret, ")");
else else
ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)", ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
top->evlist->core.cpus->nr, perf_cpu_map__nr(top->evlist->core.cpus),
top->evlist->core.cpus->nr > 1 ? "s" : ""); perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : "");
} }
perf_top__reset_sample_counters(top); perf_top__reset_sample_counters(top);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment