Commit e63da036 authored by Rik van Riel's avatar Rik van Riel Committed by Ingo Molnar

sched/numa: Allow task switch if load imbalance improves

Currently the NUMA balancing code only allows moving tasks between NUMA
nodes when the load on both nodes is in balance. This breaks down when
the load was imbalanced to begin with.

Allow tasks to be moved between NUMA nodes if the imbalance is small,
or if the new imbalance is be smaller than the original one.
Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: mgorman@suse.de
Cc: chegu_vinod@hp.com
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: http://lkml.kernel.org/r/20140514132221.274b3463@annuminas.surriel.com
parent 4027d080
...@@ -1095,6 +1095,34 @@ static void task_numa_assign(struct task_numa_env *env, ...@@ -1095,6 +1095,34 @@ static void task_numa_assign(struct task_numa_env *env,
env->best_cpu = env->dst_cpu; env->best_cpu = env->dst_cpu;
} }
static bool load_too_imbalanced(long orig_src_load, long orig_dst_load,
long src_load, long dst_load,
struct task_numa_env *env)
{
long imb, old_imb;
/* We care about the slope of the imbalance, not the direction. */
if (dst_load < src_load)
swap(dst_load, src_load);
/* Is the difference below the threshold? */
imb = dst_load * 100 - src_load * env->imbalance_pct;
if (imb <= 0)
return false;
/*
* The imbalance is above the allowed threshold.
* Compare it with the old imbalance.
*/
if (orig_dst_load < orig_src_load)
swap(orig_dst_load, orig_src_load);
old_imb = orig_dst_load * 100 - orig_src_load * env->imbalance_pct;
/* Would this change make things worse? */
return (old_imb > imb);
}
/* /*
* This checks if the overall compute and NUMA accesses of the system would * This checks if the overall compute and NUMA accesses of the system would
* be improved if the source tasks was migrated to the target dst_cpu taking * be improved if the source tasks was migrated to the target dst_cpu taking
...@@ -1107,7 +1135,8 @@ static void task_numa_compare(struct task_numa_env *env, ...@@ -1107,7 +1135,8 @@ static void task_numa_compare(struct task_numa_env *env,
struct rq *src_rq = cpu_rq(env->src_cpu); struct rq *src_rq = cpu_rq(env->src_cpu);
struct rq *dst_rq = cpu_rq(env->dst_cpu); struct rq *dst_rq = cpu_rq(env->dst_cpu);
struct task_struct *cur; struct task_struct *cur;
long dst_load, src_load; long orig_src_load, src_load;
long orig_dst_load, dst_load;
long load; long load;
long imp = (groupimp > 0) ? groupimp : taskimp; long imp = (groupimp > 0) ? groupimp : taskimp;
...@@ -1181,13 +1210,13 @@ static void task_numa_compare(struct task_numa_env *env, ...@@ -1181,13 +1210,13 @@ static void task_numa_compare(struct task_numa_env *env,
* In the overloaded case, try and keep the load balanced. * In the overloaded case, try and keep the load balanced.
*/ */
balance: balance:
dst_load = env->dst_stats.load; orig_dst_load = env->dst_stats.load;
src_load = env->src_stats.load; orig_src_load = env->src_stats.load;
/* XXX missing power terms */ /* XXX missing power terms */
load = task_h_load(env->p); load = task_h_load(env->p);
dst_load += load; dst_load = orig_dst_load + load;
src_load -= load; src_load = orig_src_load - load;
if (cur) { if (cur) {
load = task_h_load(cur); load = task_h_load(cur);
...@@ -1195,11 +1224,8 @@ static void task_numa_compare(struct task_numa_env *env, ...@@ -1195,11 +1224,8 @@ static void task_numa_compare(struct task_numa_env *env,
src_load += load; src_load += load;
} }
/* make src_load the smaller */ if (load_too_imbalanced(orig_src_load, orig_dst_load,
if (dst_load < src_load) src_load, dst_load, env))
swap(dst_load, src_load);
if (src_load * env->imbalance_pct < dst_load * 100)
goto unlock; goto unlock;
assign: assign:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment