Commit d53271c0 authored by Mathieu Desnoyers's avatar Mathieu Desnoyers Committed by Shuah Khan

selftests/rseq: Do not skip !allowed_cpus for mm_cid

Indexing with mm_cid is incompatible with skipping disallowed cpumask,
because concurrency IDs are based on a virtual ID allocation which is
unrelated to the physical CPU mask.

These issues can be reproduced by running the rseq selftests under a
taskset which excludes CPU 0, e.g.

  taskset -c 10-20 ./run_param_test.sh
Signed-off-by: default avatarMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: default avatarShuah Khan <skhan@linuxfoundation.org>
parent 6613476e
...@@ -24,6 +24,11 @@ bool rseq_validate_cpu_id(void) ...@@ -24,6 +24,11 @@ bool rseq_validate_cpu_id(void)
{ {
return rseq_mm_cid_available(); return rseq_mm_cid_available();
} }
static
bool rseq_use_cpu_index(void)
{
return false; /* Use mm_cid */
}
#else #else
# define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID # define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID
static static
...@@ -36,6 +41,11 @@ bool rseq_validate_cpu_id(void) ...@@ -36,6 +41,11 @@ bool rseq_validate_cpu_id(void)
{ {
return rseq_current_cpu_raw() >= 0; return rseq_current_cpu_raw() >= 0;
} }
static
bool rseq_use_cpu_index(void)
{
return true; /* Use cpu_id as index. */
}
#endif #endif
struct percpu_lock_entry { struct percpu_lock_entry {
...@@ -274,7 +284,7 @@ void test_percpu_list(void) ...@@ -274,7 +284,7 @@ void test_percpu_list(void)
/* Generate list entries for every usable cpu. */ /* Generate list entries for every usable cpu. */
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus); sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
for (i = 0; i < CPU_SETSIZE; i++) { for (i = 0; i < CPU_SETSIZE; i++) {
if (!CPU_ISSET(i, &allowed_cpus)) if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue; continue;
for (j = 1; j <= 100; j++) { for (j = 1; j <= 100; j++) {
struct percpu_list_node *node; struct percpu_list_node *node;
...@@ -299,7 +309,7 @@ void test_percpu_list(void) ...@@ -299,7 +309,7 @@ void test_percpu_list(void)
for (i = 0; i < CPU_SETSIZE; i++) { for (i = 0; i < CPU_SETSIZE; i++) {
struct percpu_list_node *node; struct percpu_list_node *node;
if (!CPU_ISSET(i, &allowed_cpus)) if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue; continue;
while ((node = __percpu_list_pop(&list, i))) { while ((node = __percpu_list_pop(&list, i))) {
......
...@@ -288,6 +288,11 @@ bool rseq_validate_cpu_id(void) ...@@ -288,6 +288,11 @@ bool rseq_validate_cpu_id(void)
{ {
return rseq_mm_cid_available(); return rseq_mm_cid_available();
} }
static
bool rseq_use_cpu_index(void)
{
return false; /* Use mm_cid */
}
# ifdef TEST_MEMBARRIER # ifdef TEST_MEMBARRIER
/* /*
* Membarrier does not currently support targeting a mm_cid, so * Membarrier does not currently support targeting a mm_cid, so
...@@ -312,6 +317,11 @@ bool rseq_validate_cpu_id(void) ...@@ -312,6 +317,11 @@ bool rseq_validate_cpu_id(void)
{ {
return rseq_current_cpu_raw() >= 0; return rseq_current_cpu_raw() >= 0;
} }
static
bool rseq_use_cpu_index(void)
{
return true; /* Use cpu_id as index. */
}
# ifdef TEST_MEMBARRIER # ifdef TEST_MEMBARRIER
static static
int rseq_membarrier_expedited(int cpu) int rseq_membarrier_expedited(int cpu)
...@@ -715,7 +725,7 @@ void test_percpu_list(void) ...@@ -715,7 +725,7 @@ void test_percpu_list(void)
/* Generate list entries for every usable cpu. */ /* Generate list entries for every usable cpu. */
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus); sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
for (i = 0; i < CPU_SETSIZE; i++) { for (i = 0; i < CPU_SETSIZE; i++) {
if (!CPU_ISSET(i, &allowed_cpus)) if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue; continue;
for (j = 1; j <= 100; j++) { for (j = 1; j <= 100; j++) {
struct percpu_list_node *node; struct percpu_list_node *node;
...@@ -752,7 +762,7 @@ void test_percpu_list(void) ...@@ -752,7 +762,7 @@ void test_percpu_list(void)
for (i = 0; i < CPU_SETSIZE; i++) { for (i = 0; i < CPU_SETSIZE; i++) {
struct percpu_list_node *node; struct percpu_list_node *node;
if (!CPU_ISSET(i, &allowed_cpus)) if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue; continue;
while ((node = __percpu_list_pop(&list, i))) { while ((node = __percpu_list_pop(&list, i))) {
...@@ -902,7 +912,7 @@ void test_percpu_buffer(void) ...@@ -902,7 +912,7 @@ void test_percpu_buffer(void)
/* Generate list entries for every usable cpu. */ /* Generate list entries for every usable cpu. */
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus); sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
for (i = 0; i < CPU_SETSIZE; i++) { for (i = 0; i < CPU_SETSIZE; i++) {
if (!CPU_ISSET(i, &allowed_cpus)) if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue; continue;
/* Worse-case is every item in same CPU. */ /* Worse-case is every item in same CPU. */
buffer.c[i].array = buffer.c[i].array =
...@@ -952,7 +962,7 @@ void test_percpu_buffer(void) ...@@ -952,7 +962,7 @@ void test_percpu_buffer(void)
for (i = 0; i < CPU_SETSIZE; i++) { for (i = 0; i < CPU_SETSIZE; i++) {
struct percpu_buffer_node *node; struct percpu_buffer_node *node;
if (!CPU_ISSET(i, &allowed_cpus)) if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue; continue;
while ((node = __percpu_buffer_pop(&buffer, i))) { while ((node = __percpu_buffer_pop(&buffer, i))) {
...@@ -1113,7 +1123,7 @@ void test_percpu_memcpy_buffer(void) ...@@ -1113,7 +1123,7 @@ void test_percpu_memcpy_buffer(void)
/* Generate list entries for every usable cpu. */ /* Generate list entries for every usable cpu. */
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus); sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
for (i = 0; i < CPU_SETSIZE; i++) { for (i = 0; i < CPU_SETSIZE; i++) {
if (!CPU_ISSET(i, &allowed_cpus)) if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue; continue;
/* Worse-case is every item in same CPU. */ /* Worse-case is every item in same CPU. */
buffer.c[i].array = buffer.c[i].array =
...@@ -1160,7 +1170,7 @@ void test_percpu_memcpy_buffer(void) ...@@ -1160,7 +1170,7 @@ void test_percpu_memcpy_buffer(void)
for (i = 0; i < CPU_SETSIZE; i++) { for (i = 0; i < CPU_SETSIZE; i++) {
struct percpu_memcpy_buffer_node item; struct percpu_memcpy_buffer_node item;
if (!CPU_ISSET(i, &allowed_cpus)) if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue; continue;
while (__percpu_memcpy_buffer_pop(&buffer, &item, i)) { while (__percpu_memcpy_buffer_pop(&buffer, &item, i)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment