Commit 66339c31 authored by Kirill Tkhai's avatar Kirill Tkhai Committed by Ingo Molnar

sched: Use dl_bw_of() under RCU read lock

dl_bw_of() dereferences rq->rd which has to have RCU read lock held.
Probability of use-after-free isn't zero here.

Also add lockdep assert into dl_bw_cpus().
Signed-off-by: default avatarKirill Tkhai <ktkhai@parallels.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: <stable@vger.kernel.org> # v3.14+
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20140922183624.11015.71558.stgit@localhostSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 7a96c231
......@@ -2021,6 +2021,8 @@ unsigned long to_ratio(u64 period, u64 runtime)
#ifdef CONFIG_SMP
inline struct dl_bw *dl_bw_of(int i)
{
rcu_lockdep_assert(rcu_read_lock_sched_held(),
"sched RCU must be held");
return &cpu_rq(i)->rd->dl_bw;
}
......@@ -2029,6 +2031,8 @@ static inline int dl_bw_cpus(int i)
struct root_domain *rd = cpu_rq(i)->rd;
int cpus = 0;
rcu_lockdep_assert(rcu_read_lock_sched_held(),
"sched RCU must be held");
for_each_cpu_and(i, rd->span, cpu_active_mask)
cpus++;
......@@ -7645,6 +7649,8 @@ static int sched_dl_global_constraints(void)
int cpu, ret = 0;
unsigned long flags;
rcu_read_lock();
/*
* Here we want to check the bandwidth not being set to some
* value smaller than the currently allocated bandwidth in
......@@ -7666,6 +7672,8 @@ static int sched_dl_global_constraints(void)
break;
}
rcu_read_unlock();
return ret;
}
......@@ -7681,6 +7689,7 @@ static void sched_dl_do_global(void)
if (global_rt_runtime() != RUNTIME_INF)
new_bw = to_ratio(global_rt_period(), global_rt_runtime());
rcu_read_lock();
/*
* FIXME: As above...
*/
......@@ -7691,6 +7700,7 @@ static void sched_dl_do_global(void)
dl_b->bw = new_bw;
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
}
rcu_read_unlock();
}
static int sched_rt_global_validate(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment