Commit a2e7f03e authored by Qais Yousef's avatar Qais Yousef Committed by Peter Zijlstra

sched/uclamp: Make asym_fits_capacity() use util_fits_cpu()

Use the new util_fits_cpu() to ensure migration margin and capacity
pressure are taken into account correctly when uclamp is being used
otherwise we will fail to consider CPUs as fitting in scenarios where
they should.

s/asym_fits_capacity/asym_fits_cpu/ to better reflect what it does now.

Fixes: b4c9c9f1 ("sched/fair: Prefer prev cpu in asymmetric wakeup path")
Signed-off-by: default avatarQais Yousef <qais.yousef@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220804143609.515789-6-qais.yousef@arm.com
parent b759caa1
...@@ -6807,10 +6807,13 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) ...@@ -6807,10 +6807,13 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
return best_cpu; return best_cpu;
} }
static inline bool asym_fits_capacity(unsigned long task_util, int cpu) static inline bool asym_fits_cpu(unsigned long util,
unsigned long util_min,
unsigned long util_max,
int cpu)
{ {
if (sched_asym_cpucap_active()) if (sched_asym_cpucap_active())
return fits_capacity(task_util, capacity_of(cpu)); return util_fits_cpu(util, util_min, util_max, cpu);
return true; return true;
} }
...@@ -6822,7 +6825,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) ...@@ -6822,7 +6825,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
{ {
bool has_idle_core = false; bool has_idle_core = false;
struct sched_domain *sd; struct sched_domain *sd;
unsigned long task_util; unsigned long task_util, util_min, util_max;
int i, recent_used_cpu; int i, recent_used_cpu;
/* /*
...@@ -6831,7 +6834,9 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) ...@@ -6831,7 +6834,9 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
*/ */
if (sched_asym_cpucap_active()) { if (sched_asym_cpucap_active()) {
sync_entity_load_avg(&p->se); sync_entity_load_avg(&p->se);
task_util = uclamp_task_util(p); task_util = task_util_est(p);
util_min = uclamp_eff_value(p, UCLAMP_MIN);
util_max = uclamp_eff_value(p, UCLAMP_MAX);
} }
/* /*
...@@ -6840,7 +6845,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) ...@@ -6840,7 +6845,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
if ((available_idle_cpu(target) || sched_idle_cpu(target)) && if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
asym_fits_capacity(task_util, target)) asym_fits_cpu(task_util, util_min, util_max, target))
return target; return target;
/* /*
...@@ -6848,7 +6853,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) ...@@ -6848,7 +6853,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
*/ */
if (prev != target && cpus_share_cache(prev, target) && if (prev != target && cpus_share_cache(prev, target) &&
(available_idle_cpu(prev) || sched_idle_cpu(prev)) && (available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
asym_fits_capacity(task_util, prev)) asym_fits_cpu(task_util, util_min, util_max, prev))
return prev; return prev;
/* /*
...@@ -6863,7 +6868,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) ...@@ -6863,7 +6868,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
in_task() && in_task() &&
prev == smp_processor_id() && prev == smp_processor_id() &&
this_rq()->nr_running <= 1 && this_rq()->nr_running <= 1 &&
asym_fits_capacity(task_util, prev)) { asym_fits_cpu(task_util, util_min, util_max, prev)) {
return prev; return prev;
} }
...@@ -6875,7 +6880,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) ...@@ -6875,7 +6880,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
cpus_share_cache(recent_used_cpu, target) && cpus_share_cache(recent_used_cpu, target) &&
(available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) && cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) &&
asym_fits_capacity(task_util, recent_used_cpu)) { asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) {
return recent_used_cpu; return recent_used_cpu;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment