Commit b1433395 authored by Peter Zijlstra's avatar Peter Zijlstra

sched,rcuperf: Convert to sched_set_fifo_low()

Because SCHED_FIFO is a broken scheduler model (see previous patches)
take away the priority field, the kernel can't possibly make an
informed decision.

Effectively no change.

Cc: paulmck@kernel.org
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarIngo Molnar <mingo@kernel.org>
Reviewed-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent 93db9129
...@@ -354,7 +354,6 @@ rcu_perf_writer(void *arg) ...@@ -354,7 +354,6 @@ rcu_perf_writer(void *arg)
int i_max; int i_max;
long me = (long)arg; long me = (long)arg;
struct rcu_head *rhp = NULL; struct rcu_head *rhp = NULL;
struct sched_param sp;
bool started = false, done = false, alldone = false; bool started = false, done = false, alldone = false;
u64 t; u64 t;
u64 *wdp; u64 *wdp;
...@@ -363,8 +362,7 @@ rcu_perf_writer(void *arg) ...@@ -363,8 +362,7 @@ rcu_perf_writer(void *arg)
VERBOSE_PERFOUT_STRING("rcu_perf_writer task started"); VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
WARN_ON(!wdpp); WARN_ON(!wdpp);
set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
sp.sched_priority = 1; sched_set_fifo_low(current);
sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
if (holdoff) if (holdoff)
schedule_timeout_uninterruptible(holdoff * HZ); schedule_timeout_uninterruptible(holdoff * HZ);
...@@ -420,9 +418,7 @@ rcu_perf_writer(void *arg) ...@@ -420,9 +418,7 @@ rcu_perf_writer(void *arg)
started = true; started = true;
if (!done && i >= MIN_MEAS) { if (!done && i >= MIN_MEAS) {
done = true; done = true;
sp.sched_priority = 0; sched_set_normal(current, 0);
sched_setscheduler_nocheck(current,
SCHED_NORMAL, &sp);
pr_alert("%s%s rcu_perf_writer %ld has %d measurements\n", pr_alert("%s%s rcu_perf_writer %ld has %d measurements\n",
perf_type, PERF_FLAG, me, MIN_MEAS); perf_type, PERF_FLAG, me, MIN_MEAS);
if (atomic_inc_return(&n_rcu_perf_writer_finished) >= if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment