Commit 5d4dfddd authored by Nicolas Pitre's avatar Nicolas Pitre Committed by Ingo Molnar

sched: Rename capacity related flags

It is better not to think about compute capacity as being equivalent
to "CPU power".  The upcoming "power aware" scheduler work may create
confusion with the notion of energy consumption if "power" is used too
liberally.

Let's rename the following feature flags since they do relate to capacity:

	SD_SHARE_CPUPOWER  -> SD_SHARE_CPUCAPACITY
	ARCH_POWER         -> ARCH_CAPACITY
	NONTASK_POWER      -> NONTASK_CAPACITY
Signed-off-by: default avatarNicolas Pitre <nico@linaro.org>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: linaro-kernel@lists.linaro.org
Cc: Andy Fleming <afleming@freescale.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Grant Likely <grant.likely@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Preeti U Murthy <preeti@linux.vnet.ibm.com>
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Toshi Kani <toshi.kani@hp.com>
Cc: Vasant Hegde <hegdevasant@linux.vnet.ibm.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: devicetree@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/n/tip-e93lpnxb87owfievqatey6b5@git.kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent ca8ce3d0
...@@ -770,7 +770,7 @@ int setup_profiling_timer(unsigned int multiplier) ...@@ -770,7 +770,7 @@ int setup_profiling_timer(unsigned int multiplier)
/* cpumask of CPUs with asymetric SMT dependancy */ /* cpumask of CPUs with asymetric SMT dependancy */
static const int powerpc_smt_flags(void) static const int powerpc_smt_flags(void)
{ {
int flags = SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES; int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
......
...@@ -869,7 +869,7 @@ enum cpu_idle_type { ...@@ -869,7 +869,7 @@ enum cpu_idle_type {
#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ #define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */
#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ #define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
...@@ -881,7 +881,7 @@ enum cpu_idle_type { ...@@ -881,7 +881,7 @@ enum cpu_idle_type {
#ifdef CONFIG_SCHED_SMT #ifdef CONFIG_SCHED_SMT
static inline const int cpu_smt_flags(void) static inline const int cpu_smt_flags(void)
{ {
return SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES; return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
} }
#endif #endif
......
...@@ -872,7 +872,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) ...@@ -872,7 +872,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
rq->clock_task += delta; rq->clock_task += delta;
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
if ((irq_delta + steal) && sched_feat(NONTASK_POWER)) if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
sched_rt_avg_update(rq, irq_delta + steal); sched_rt_avg_update(rq, irq_delta + steal);
#endif #endif
} }
...@@ -5309,7 +5309,7 @@ static int sd_degenerate(struct sched_domain *sd) ...@@ -5309,7 +5309,7 @@ static int sd_degenerate(struct sched_domain *sd)
SD_BALANCE_NEWIDLE | SD_BALANCE_NEWIDLE |
SD_BALANCE_FORK | SD_BALANCE_FORK |
SD_BALANCE_EXEC | SD_BALANCE_EXEC |
SD_SHARE_CPUPOWER | SD_SHARE_CPUCAPACITY |
SD_SHARE_PKG_RESOURCES | SD_SHARE_PKG_RESOURCES |
SD_SHARE_POWERDOMAIN)) { SD_SHARE_POWERDOMAIN)) {
if (sd->groups != sd->groups->next) if (sd->groups != sd->groups->next)
...@@ -5340,7 +5340,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) ...@@ -5340,7 +5340,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
SD_BALANCE_NEWIDLE | SD_BALANCE_NEWIDLE |
SD_BALANCE_FORK | SD_BALANCE_FORK |
SD_BALANCE_EXEC | SD_BALANCE_EXEC |
SD_SHARE_CPUPOWER | SD_SHARE_CPUCAPACITY |
SD_SHARE_PKG_RESOURCES | SD_SHARE_PKG_RESOURCES |
SD_PREFER_SIBLING | SD_PREFER_SIBLING |
SD_SHARE_POWERDOMAIN); SD_SHARE_POWERDOMAIN);
...@@ -5947,7 +5947,7 @@ static int sched_domains_curr_level; ...@@ -5947,7 +5947,7 @@ static int sched_domains_curr_level;
/* /*
* SD_flags allowed in topology descriptions. * SD_flags allowed in topology descriptions.
* *
* SD_SHARE_CPUPOWER - describes SMT topologies * SD_SHARE_CPUCAPACITY - describes SMT topologies
* SD_SHARE_PKG_RESOURCES - describes shared caches * SD_SHARE_PKG_RESOURCES - describes shared caches
* SD_NUMA - describes NUMA topologies * SD_NUMA - describes NUMA topologies
* SD_SHARE_POWERDOMAIN - describes shared power domain * SD_SHARE_POWERDOMAIN - describes shared power domain
...@@ -5956,7 +5956,7 @@ static int sched_domains_curr_level; ...@@ -5956,7 +5956,7 @@ static int sched_domains_curr_level;
* SD_ASYM_PACKING - describes SMT quirks * SD_ASYM_PACKING - describes SMT quirks
*/ */
#define TOPOLOGY_SD_FLAGS \ #define TOPOLOGY_SD_FLAGS \
(SD_SHARE_CPUPOWER | \ (SD_SHARE_CPUCAPACITY | \
SD_SHARE_PKG_RESOURCES | \ SD_SHARE_PKG_RESOURCES | \
SD_NUMA | \ SD_NUMA | \
SD_ASYM_PACKING | \ SD_ASYM_PACKING | \
...@@ -6002,7 +6002,7 @@ sd_init(struct sched_domain_topology_level *tl, int cpu) ...@@ -6002,7 +6002,7 @@ sd_init(struct sched_domain_topology_level *tl, int cpu)
| 1*SD_BALANCE_FORK | 1*SD_BALANCE_FORK
| 0*SD_BALANCE_WAKE | 0*SD_BALANCE_WAKE
| 1*SD_WAKE_AFFINE | 1*SD_WAKE_AFFINE
| 0*SD_SHARE_CPUPOWER | 0*SD_SHARE_CPUCAPACITY
| 0*SD_SHARE_PKG_RESOURCES | 0*SD_SHARE_PKG_RESOURCES
| 0*SD_SERIALIZE | 0*SD_SERIALIZE
| 0*SD_PREFER_SIBLING | 0*SD_PREFER_SIBLING
...@@ -6024,7 +6024,7 @@ sd_init(struct sched_domain_topology_level *tl, int cpu) ...@@ -6024,7 +6024,7 @@ sd_init(struct sched_domain_topology_level *tl, int cpu)
* Convert topological properties into behaviour. * Convert topological properties into behaviour.
*/ */
if (sd->flags & SD_SHARE_CPUPOWER) { if (sd->flags & SD_SHARE_CPUCAPACITY) {
sd->imbalance_pct = 110; sd->imbalance_pct = 110;
sd->smt_gain = 1178; /* ~15% */ sd->smt_gain = 1178; /* ~15% */
......
...@@ -5672,8 +5672,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) ...@@ -5672,8 +5672,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
unsigned long capacity = SCHED_CAPACITY_SCALE; unsigned long capacity = SCHED_CAPACITY_SCALE;
struct sched_group *sdg = sd->groups; struct sched_group *sdg = sd->groups;
if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { if ((sd->flags & SD_SHARE_CPUCAPACITY) && weight > 1) {
if (sched_feat(ARCH_POWER)) if (sched_feat(ARCH_CAPACITY))
capacity *= arch_scale_smt_capacity(sd, cpu); capacity *= arch_scale_smt_capacity(sd, cpu);
else else
capacity *= default_scale_smt_capacity(sd, cpu); capacity *= default_scale_smt_capacity(sd, cpu);
...@@ -5683,7 +5683,7 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) ...@@ -5683,7 +5683,7 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
sdg->sgc->capacity_orig = capacity; sdg->sgc->capacity_orig = capacity;
if (sched_feat(ARCH_POWER)) if (sched_feat(ARCH_CAPACITY))
capacity *= arch_scale_freq_capacity(sd, cpu); capacity *= arch_scale_freq_capacity(sd, cpu);
else else
capacity *= default_scale_capacity(sd, cpu); capacity *= default_scale_capacity(sd, cpu);
...@@ -5782,7 +5782,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) ...@@ -5782,7 +5782,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
/* /*
* Only siblings can have significantly less than SCHED_CAPACITY_SCALE * Only siblings can have significantly less than SCHED_CAPACITY_SCALE
*/ */
if (!(sd->flags & SD_SHARE_CPUPOWER)) if (!(sd->flags & SD_SHARE_CPUCAPACITY))
return 0; return 0;
/* /*
......
...@@ -37,18 +37,18 @@ SCHED_FEAT(CACHE_HOT_BUDDY, true) ...@@ -37,18 +37,18 @@ SCHED_FEAT(CACHE_HOT_BUDDY, true)
SCHED_FEAT(WAKEUP_PREEMPTION, true) SCHED_FEAT(WAKEUP_PREEMPTION, true)
/* /*
* Use arch dependent cpu power functions * Use arch dependent cpu capacity functions
*/ */
SCHED_FEAT(ARCH_POWER, true) SCHED_FEAT(ARCH_CAPACITY, true)
SCHED_FEAT(HRTICK, false) SCHED_FEAT(HRTICK, false)
SCHED_FEAT(DOUBLE_TICK, false) SCHED_FEAT(DOUBLE_TICK, false)
SCHED_FEAT(LB_BIAS, true) SCHED_FEAT(LB_BIAS, true)
/* /*
* Decrement CPU power based on time not spent running tasks * Decrement CPU capacity based on time not spent running tasks
*/ */
SCHED_FEAT(NONTASK_POWER, true) SCHED_FEAT(NONTASK_CAPACITY, true)
/* /*
* Queue remote wakeups on the target CPU and process them * Queue remote wakeups on the target CPU and process them
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment