Commit 299687e1 authored by Athira Rajeev's avatar Athira Rajeev Committed by Arnaldo Carvalho de Melo

perf bench: Fix epoll bench to correct usage of affinity for machines with #CPUs > 1K

The 'perf bench epoll' testcase fails on systems with more than 1K CPUs.

Testcase: perf bench epoll all

Result snippet:
<<>>
Run summary [PID 106497]: 1399 threads monitoring on 64 file-descriptors for 8 secs.

perf: pthread_create: No such file or directory
<<>>

In epoll benchmarks (ctl, wait) pthread_create is invoked in do_threads
from respective bench_epoll_*  function. Though the logs shows direct
failure from pthread_create, the actual failure is from
"sched_setaffinity" returning EINVAL (invalid argument).

This happens because the default mask size in glibc is 1024. To overcome
this 1024 CPUs mask size limitation of cpu_set_t, change the mask size
using the CPU_*_S macros.

Patch addresses this by fixing all the epoll benchmarks to use CPU_ALLOC
to allocate cpumask, CPU_ALLOC_SIZE for size, and CPU_SET_S to set the
mask.
Reported-by: default avatarDisha Goel <disgoel@linux.vnet.ibm.com>
Signed-off-by: default avatarAthira Jajeev <atrajeev@linux.vnet.ibm.com>
Tested-by: default avatarDisha Goel <disgoel@linux.vnet.ibm.com>
Acked-by: default avatarIan Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nageswara R Sastry <rnsastry@linux.ibm.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: https://lore.kernel.org/r/20220406175113.87881-3-atrajeev@linux.vnet.ibm.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent c9c2a427
...@@ -222,13 +222,20 @@ static void init_fdmaps(struct worker *w, int pct) ...@@ -222,13 +222,20 @@ static void init_fdmaps(struct worker *w, int pct)
static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
{ {
pthread_attr_t thread_attr, *attrp = NULL; pthread_attr_t thread_attr, *attrp = NULL;
cpu_set_t cpuset; cpu_set_t *cpuset;
unsigned int i, j; unsigned int i, j;
int ret = 0; int ret = 0;
int nrcpus;
size_t size;
if (!noaffinity) if (!noaffinity)
pthread_attr_init(&thread_attr); pthread_attr_init(&thread_attr);
nrcpus = perf_cpu_map__nr(cpu);
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
for (i = 0; i < nthreads; i++) { for (i = 0; i < nthreads; i++) {
struct worker *w = &worker[i]; struct worker *w = &worker[i];
...@@ -252,22 +259,28 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) ...@@ -252,22 +259,28 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
init_fdmaps(w, 50); init_fdmaps(w, 50);
if (!noaffinity) { if (!noaffinity) {
CPU_ZERO(&cpuset); CPU_ZERO_S(size, cpuset);
CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset); CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
size, cpuset);
ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset); ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
if (ret) if (ret) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
}
attrp = &thread_attr; attrp = &thread_attr;
} }
ret = pthread_create(&w->thread, attrp, workerfn, ret = pthread_create(&w->thread, attrp, workerfn,
(void *)(struct worker *) w); (void *)(struct worker *) w);
if (ret) if (ret) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create"); err(EXIT_FAILURE, "pthread_create");
}
} }
CPU_FREE(cpuset);
if (!noaffinity) if (!noaffinity)
pthread_attr_destroy(&thread_attr); pthread_attr_destroy(&thread_attr);
......
...@@ -291,9 +291,11 @@ static void print_summary(void) ...@@ -291,9 +291,11 @@ static void print_summary(void)
static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
{ {
pthread_attr_t thread_attr, *attrp = NULL; pthread_attr_t thread_attr, *attrp = NULL;
cpu_set_t cpuset; cpu_set_t *cpuset;
unsigned int i, j; unsigned int i, j;
int ret = 0, events = EPOLLIN; int ret = 0, events = EPOLLIN;
int nrcpus;
size_t size;
if (oneshot) if (oneshot)
events |= EPOLLONESHOT; events |= EPOLLONESHOT;
...@@ -306,6 +308,11 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) ...@@ -306,6 +308,11 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
if (!noaffinity) if (!noaffinity)
pthread_attr_init(&thread_attr); pthread_attr_init(&thread_attr);
nrcpus = perf_cpu_map__nr(cpu);
cpuset = CPU_ALLOC(nrcpus);
BUG_ON(!cpuset);
size = CPU_ALLOC_SIZE(nrcpus);
for (i = 0; i < nthreads; i++) { for (i = 0; i < nthreads; i++) {
struct worker *w = &worker[i]; struct worker *w = &worker[i];
...@@ -341,22 +348,28 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) ...@@ -341,22 +348,28 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
} }
if (!noaffinity) { if (!noaffinity) {
CPU_ZERO(&cpuset); CPU_ZERO_S(size, cpuset);
CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset); CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
size, cpuset);
ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset); ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
if (ret) if (ret) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
}
attrp = &thread_attr; attrp = &thread_attr;
} }
ret = pthread_create(&w->thread, attrp, workerfn, ret = pthread_create(&w->thread, attrp, workerfn,
(void *)(struct worker *) w); (void *)(struct worker *) w);
if (ret) if (ret) {
CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create"); err(EXIT_FAILURE, "pthread_create");
}
} }
CPU_FREE(cpuset);
if (!noaffinity) if (!noaffinity)
pthread_attr_destroy(&thread_attr); pthread_attr_destroy(&thread_attr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment