Commit f10e00f4 authored by Kirill Tkhai's avatar Kirill Tkhai Committed by Ingo Molnar

sched/dl: Use dl_bw_of() under rcu_read_lock_sched()

rq->rd is freed using call_rcu_sched(), so rcu_read_lock() to access it
is not enough. We should use either rcu_read_lock_sched() or preempt_disable().
Reported-by: default avatarSasha Levin <sasha.levin@oracle.com>
Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: Kirill Tkhai <ktkhai@parallels.com
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Fixes: 66339c31 "sched: Use dl_bw_of() under RCU read lock"
Link: http://lkml.kernel.org/r/1412065417.20287.24.camel@tkhaiSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 10a12983
...@@ -5264,6 +5264,7 @@ static int sched_cpu_inactive(struct notifier_block *nfb, ...@@ -5264,6 +5264,7 @@ static int sched_cpu_inactive(struct notifier_block *nfb,
{ {
unsigned long flags; unsigned long flags;
long cpu = (long)hcpu; long cpu = (long)hcpu;
struct dl_bw *dl_b;
switch (action & ~CPU_TASKS_FROZEN) { switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
...@@ -5271,15 +5272,19 @@ static int sched_cpu_inactive(struct notifier_block *nfb, ...@@ -5271,15 +5272,19 @@ static int sched_cpu_inactive(struct notifier_block *nfb,
/* explicitly allow suspend */ /* explicitly allow suspend */
if (!(action & CPU_TASKS_FROZEN)) { if (!(action & CPU_TASKS_FROZEN)) {
struct dl_bw *dl_b = dl_bw_of(cpu);
bool overflow; bool overflow;
int cpus; int cpus;
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags); raw_spin_lock_irqsave(&dl_b->lock, flags);
cpus = dl_bw_cpus(cpu); cpus = dl_bw_cpus(cpu);
overflow = __dl_overflow(dl_b, cpus, 0, 0); overflow = __dl_overflow(dl_b, cpus, 0, 0);
raw_spin_unlock_irqrestore(&dl_b->lock, flags); raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched();
if (overflow) if (overflow)
return notifier_from_errno(-EBUSY); return notifier_from_errno(-EBUSY);
} }
...@@ -7647,11 +7652,10 @@ static int sched_dl_global_constraints(void) ...@@ -7647,11 +7652,10 @@ static int sched_dl_global_constraints(void)
u64 runtime = global_rt_runtime(); u64 runtime = global_rt_runtime();
u64 period = global_rt_period(); u64 period = global_rt_period();
u64 new_bw = to_ratio(period, runtime); u64 new_bw = to_ratio(period, runtime);
struct dl_bw *dl_b;
int cpu, ret = 0; int cpu, ret = 0;
unsigned long flags; unsigned long flags;
rcu_read_lock();
/* /*
* Here we want to check the bandwidth not being set to some * Here we want to check the bandwidth not being set to some
* value smaller than the currently allocated bandwidth in * value smaller than the currently allocated bandwidth in
...@@ -7662,25 +7666,27 @@ static int sched_dl_global_constraints(void) ...@@ -7662,25 +7666,27 @@ static int sched_dl_global_constraints(void)
* solutions is welcome! * solutions is welcome!
*/ */
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct dl_bw *dl_b = dl_bw_of(cpu); rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags); raw_spin_lock_irqsave(&dl_b->lock, flags);
if (new_bw < dl_b->total_bw) if (new_bw < dl_b->total_bw)
ret = -EBUSY; ret = -EBUSY;
raw_spin_unlock_irqrestore(&dl_b->lock, flags); raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched();
if (ret) if (ret)
break; break;
} }
rcu_read_unlock();
return ret; return ret;
} }
static void sched_dl_do_global(void) static void sched_dl_do_global(void)
{ {
u64 new_bw = -1; u64 new_bw = -1;
struct dl_bw *dl_b;
int cpu; int cpu;
unsigned long flags; unsigned long flags;
...@@ -7690,18 +7696,19 @@ static void sched_dl_do_global(void) ...@@ -7690,18 +7696,19 @@ static void sched_dl_do_global(void)
if (global_rt_runtime() != RUNTIME_INF) if (global_rt_runtime() != RUNTIME_INF)
new_bw = to_ratio(global_rt_period(), global_rt_runtime()); new_bw = to_ratio(global_rt_period(), global_rt_runtime());
rcu_read_lock();
/* /*
* FIXME: As above... * FIXME: As above...
*/ */
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct dl_bw *dl_b = dl_bw_of(cpu); rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags); raw_spin_lock_irqsave(&dl_b->lock, flags);
dl_b->bw = new_bw; dl_b->bw = new_bw;
raw_spin_unlock_irqrestore(&dl_b->lock, flags); raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched();
} }
rcu_read_unlock();
} }
static int sched_rt_global_validate(void) static int sched_rt_global_validate(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment