Commit f03542a7 authored by Alex Shi's avatar Alex Shi Committed by Thomas Gleixner

sched: recover SD_WAKE_AFFINE in select_task_rq_fair and code clean up

Since power saving code was removed from sched now, the implement
code is out of service in this function, and even pollute other logical.
like, 'want_sd' never has chance to be set '0', that remove the effect
of SD_WAKE_AFFINE here.

So, clean up the obsolete code, includes SD_PREFER_LOCAL.
Signed-off-by: default avatarAlex Shi <alex.shi@intel.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/5028F431.6000306@intel.comSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 78feefc5
...@@ -860,7 +860,6 @@ enum cpu_idle_type { ...@@ -860,7 +860,6 @@ enum cpu_idle_type {
#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
#define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */
#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
......
...@@ -129,7 +129,6 @@ int arch_update_cpu_topology(void); ...@@ -129,7 +129,6 @@ int arch_update_cpu_topology(void);
| 1*SD_BALANCE_FORK \ | 1*SD_BALANCE_FORK \
| 0*SD_BALANCE_WAKE \ | 0*SD_BALANCE_WAKE \
| 1*SD_WAKE_AFFINE \ | 1*SD_WAKE_AFFINE \
| 0*SD_PREFER_LOCAL \
| 0*SD_SHARE_CPUPOWER \ | 0*SD_SHARE_CPUPOWER \
| 1*SD_SHARE_PKG_RESOURCES \ | 1*SD_SHARE_PKG_RESOURCES \
| 0*SD_SERIALIZE \ | 0*SD_SERIALIZE \
...@@ -160,7 +159,6 @@ int arch_update_cpu_topology(void); ...@@ -160,7 +159,6 @@ int arch_update_cpu_topology(void);
| 1*SD_BALANCE_FORK \ | 1*SD_BALANCE_FORK \
| 0*SD_BALANCE_WAKE \ | 0*SD_BALANCE_WAKE \
| 1*SD_WAKE_AFFINE \ | 1*SD_WAKE_AFFINE \
| 0*SD_PREFER_LOCAL \
| 0*SD_SHARE_CPUPOWER \ | 0*SD_SHARE_CPUPOWER \
| 0*SD_SHARE_PKG_RESOURCES \ | 0*SD_SHARE_PKG_RESOURCES \
| 0*SD_SERIALIZE \ | 0*SD_SERIALIZE \
......
...@@ -6622,7 +6622,6 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu) ...@@ -6622,7 +6622,6 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
| 0*SD_BALANCE_FORK | 0*SD_BALANCE_FORK
| 0*SD_BALANCE_WAKE | 0*SD_BALANCE_WAKE
| 0*SD_WAKE_AFFINE | 0*SD_WAKE_AFFINE
| 0*SD_PREFER_LOCAL
| 0*SD_SHARE_CPUPOWER | 0*SD_SHARE_CPUPOWER
| 0*SD_SHARE_PKG_RESOURCES | 0*SD_SHARE_PKG_RESOURCES
| 1*SD_SERIALIZE | 1*SD_SERIALIZE
......
...@@ -2686,7 +2686,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) ...@@ -2686,7 +2686,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
int prev_cpu = task_cpu(p); int prev_cpu = task_cpu(p);
int new_cpu = cpu; int new_cpu = cpu;
int want_affine = 0; int want_affine = 0;
int want_sd = 1;
int sync = wake_flags & WF_SYNC; int sync = wake_flags & WF_SYNC;
if (p->nr_cpus_allowed == 1) if (p->nr_cpus_allowed == 1)
...@@ -2703,27 +2702,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) ...@@ -2703,27 +2702,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
if (!(tmp->flags & SD_LOAD_BALANCE)) if (!(tmp->flags & SD_LOAD_BALANCE))
continue; continue;
/*
* If power savings logic is enabled for a domain, see if we
* are not overloaded, if so, don't balance wider.
*/
if (tmp->flags & (SD_PREFER_LOCAL)) {
unsigned long power = 0;
unsigned long nr_running = 0;
unsigned long capacity;
int i;
for_each_cpu(i, sched_domain_span(tmp)) {
power += power_of(i);
nr_running += cpu_rq(i)->cfs.nr_running;
}
capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
if (nr_running < capacity)
want_sd = 0;
}
/* /*
* If both cpu and prev_cpu are part of this domain, * If both cpu and prev_cpu are part of this domain,
* cpu is a valid SD_WAKE_AFFINE target. * cpu is a valid SD_WAKE_AFFINE target.
...@@ -2731,21 +2709,15 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) ...@@ -2731,21 +2709,15 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
affine_sd = tmp; affine_sd = tmp;
want_affine = 0;
}
if (!want_sd && !want_affine)
break; break;
}
if (!(tmp->flags & sd_flag)) if (tmp->flags & sd_flag)
continue;
if (want_sd)
sd = tmp; sd = tmp;
} }
if (affine_sd) { if (affine_sd) {
if (cpu == prev_cpu || wake_affine(affine_sd, p, sync)) if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
prev_cpu = cpu; prev_cpu = cpu;
new_cpu = select_idle_sibling(p, prev_cpu); new_cpu = select_idle_sibling(p, prev_cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment