Commit 3c18d447 authored by Juri Lelli's avatar Juri Lelli Committed by Ingo Molnar

sched/core: Check for available DL bandwidth in cpuset_cpu_inactive()

Hotplug operations are destructive w.r.t. cpusets. In case such an
operation is performed on a CPU belonging to an exlusive cpuset, the
DL bandwidth information associated with the corresponding root
domain is gone even if the operation fails (in sched_cpu_inactive()).

For this reason we need to move the check we currently have in
sched_cpu_inactive() to cpuset_cpu_inactive() to prevent useless
cpusets reconfiguration in the CPU_DOWN_FAILED path.
Signed-off-by: default avatarJuri Lelli <juri.lelli@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@gmail.com>
Link: http://lkml.kernel.org/r/1427792017-7356-2-git-send-email-juri.lelli@arm.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 4cd57f97
...@@ -5337,36 +5337,13 @@ static int sched_cpu_active(struct notifier_block *nfb, ...@@ -5337,36 +5337,13 @@ static int sched_cpu_active(struct notifier_block *nfb,
static int sched_cpu_inactive(struct notifier_block *nfb, static int sched_cpu_inactive(struct notifier_block *nfb,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
unsigned long flags;
long cpu = (long)hcpu;
struct dl_bw *dl_b;
switch (action & ~CPU_TASKS_FROZEN) { switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
set_cpu_active(cpu, false); set_cpu_active((long)hcpu, false);
/* explicitly allow suspend */
if (!(action & CPU_TASKS_FROZEN)) {
bool overflow;
int cpus;
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
cpus = dl_bw_cpus(cpu);
overflow = __dl_overflow(dl_b, cpus, 0, 0);
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched();
if (overflow)
return notifier_from_errno(-EBUSY);
}
return NOTIFY_OK; return NOTIFY_OK;
default:
return NOTIFY_DONE;
} }
return NOTIFY_DONE;
} }
static int __init migration_init(void) static int __init migration_init(void)
...@@ -7006,7 +6983,6 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, ...@@ -7006,7 +6983,6 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
*/ */
case CPU_ONLINE: case CPU_ONLINE:
case CPU_DOWN_FAILED:
cpuset_update_active_cpus(true); cpuset_update_active_cpus(true);
break; break;
default: default:
...@@ -7018,8 +6994,32 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, ...@@ -7018,8 +6994,32 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
void *hcpu) void *hcpu)
{ {
switch (action) { unsigned long flags;
long cpu = (long)hcpu;
struct dl_bw *dl_b;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
/* explicitly allow suspend */
if (!(action & CPU_TASKS_FROZEN)) {
bool overflow;
int cpus;
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
cpus = dl_bw_cpus(cpu);
overflow = __dl_overflow(dl_b, cpus, 0, 0);
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched();
if (overflow) {
trace_printk("hotplug failed for cpu %lu", cpu);
return notifier_from_errno(-EBUSY);
}
}
cpuset_update_active_cpus(false); cpuset_update_active_cpus(false);
break; break;
case CPU_DOWN_PREPARE_FROZEN: case CPU_DOWN_PREPARE_FROZEN:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment