Commit bf57ae21 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'sched-core-2022-12-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler updates from Ingo Molnar:

 - Implement persistent user-requested affinity: introduce
   affinity_context::user_mask and unconditionally preserve the
   user-requested CPU affinity masks, for long-lived tasks to better
   interact with cpusets & CPU hotplug events over longer timespans,
   without destroying the original affinity intent if the underlying
   topology changes.

 - Uclamp updates: fix relationship between uclamp and fits_capacity()

 - PSI fixes

 - Misc fixes & updates

* tag 'sched-core-2022-12-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched: Clear ttwu_pending after enqueue_task()
  sched/psi: Use task->psi_flags to clear in CPU migration
  sched/psi: Stop relying on timer_pending() for poll_work rescheduling
  sched/psi: Fix avgs_work re-arm in psi_avgs_work()
  sched/psi: Fix possible missing or delayed pending event
  sched: Always clear user_cpus_ptr in do_set_cpus_allowed()
  sched: Enforce user requested affinity
  sched: Always preserve the user requested cpumask
  sched: Introduce affinity_context
  sched: Add __releases annotations to affine_move_task()
  sched/fair: Check if prev_cpu has highest spare cap in feec()
  sched/fair: Consider capacity inversion in util_fits_cpu()
  sched/fair: Detect capacity inversion
  sched/uclamp: Cater for uclamp in find_energy_efficient_cpu()'s early exit condition
  sched/uclamp: Make cpu_overutilized() use util_fits_cpu()
  sched/uclamp: Make asym_fits_capacity() use util_fits_cpu()
  sched/uclamp: Make select_idle_capacity() use util_fits_cpu()
  sched/uclamp: Fix fits_capacity() check in feec()
  sched/uclamp: Make task_fits_capacity() use util_fits_cpu()
  sched/uclamp: Fix relationship between uclamp and migration margin
parents add76959 d6962c4f
...@@ -72,6 +72,9 @@ enum psi_states { ...@@ -72,6 +72,9 @@ enum psi_states {
/* Use one bit in the state mask to track TSK_ONCPU */ /* Use one bit in the state mask to track TSK_ONCPU */
#define PSI_ONCPU (1 << NR_PSI_STATES) #define PSI_ONCPU (1 << NR_PSI_STATES)
/* Flag whether to re-arm avgs_work, see details in get_recent_times() */
#define PSI_STATE_RESCHEDULE (1 << (NR_PSI_STATES + 1))
enum psi_aggregators { enum psi_aggregators {
PSI_AVGS = 0, PSI_AVGS = 0,
PSI_POLL, PSI_POLL,
...@@ -177,6 +180,7 @@ struct psi_group { ...@@ -177,6 +180,7 @@ struct psi_group {
struct timer_list poll_timer; struct timer_list poll_timer;
wait_queue_head_t poll_wait; wait_queue_head_t poll_wait;
atomic_t poll_wakeup; atomic_t poll_wakeup;
atomic_t poll_scheduled;
/* Protects data used by the monitor */ /* Protects data used by the monitor */
struct mutex trigger_lock; struct mutex trigger_lock;
......
...@@ -888,9 +888,6 @@ struct task_struct { ...@@ -888,9 +888,6 @@ struct task_struct {
unsigned sched_reset_on_fork:1; unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1; unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1; unsigned sched_migrated:1;
#ifdef CONFIG_PSI
unsigned sched_psi_wake_requeue:1;
#endif
/* Force alignment to the next boundary: */ /* Force alignment to the next boundary: */
unsigned :0; unsigned :0;
......
This diff is collapsed.
...@@ -2485,8 +2485,7 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p) ...@@ -2485,8 +2485,7 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
} }
static void set_cpus_allowed_dl(struct task_struct *p, static void set_cpus_allowed_dl(struct task_struct *p,
const struct cpumask *new_mask, struct affinity_context *ctx)
u32 flags)
{ {
struct root_domain *src_rd; struct root_domain *src_rd;
struct rq *rq; struct rq *rq;
...@@ -2501,7 +2500,7 @@ static void set_cpus_allowed_dl(struct task_struct *p, ...@@ -2501,7 +2500,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
* update. We already made space for us in the destination * update. We already made space for us in the destination
* domain (see cpuset_can_attach()). * domain (see cpuset_can_attach()).
*/ */
if (!cpumask_intersects(src_rd->span, new_mask)) { if (!cpumask_intersects(src_rd->span, ctx->new_mask)) {
struct dl_bw *src_dl_b; struct dl_bw *src_dl_b;
src_dl_b = dl_bw_of(cpu_of(rq)); src_dl_b = dl_bw_of(cpu_of(rq));
...@@ -2515,7 +2514,7 @@ static void set_cpus_allowed_dl(struct task_struct *p, ...@@ -2515,7 +2514,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
raw_spin_unlock(&src_dl_b->lock); raw_spin_unlock(&src_dl_b->lock);
} }
set_cpus_allowed_common(p, new_mask, flags); set_cpus_allowed_common(p, ctx);
} }
/* Assumes rq->lock is held */ /* Assumes rq->lock is held */
......
This diff is collapsed.
...@@ -189,6 +189,7 @@ static void group_init(struct psi_group *group) ...@@ -189,6 +189,7 @@ static void group_init(struct psi_group *group)
INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
mutex_init(&group->avgs_lock); mutex_init(&group->avgs_lock);
/* Init trigger-related members */ /* Init trigger-related members */
atomic_set(&group->poll_scheduled, 0);
mutex_init(&group->trigger_lock); mutex_init(&group->trigger_lock);
INIT_LIST_HEAD(&group->triggers); INIT_LIST_HEAD(&group->triggers);
group->poll_min_period = U32_MAX; group->poll_min_period = U32_MAX;
...@@ -242,6 +243,8 @@ static void get_recent_times(struct psi_group *group, int cpu, ...@@ -242,6 +243,8 @@ static void get_recent_times(struct psi_group *group, int cpu,
u32 *pchanged_states) u32 *pchanged_states)
{ {
struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu); struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
int current_cpu = raw_smp_processor_id();
unsigned int tasks[NR_PSI_TASK_COUNTS];
u64 now, state_start; u64 now, state_start;
enum psi_states s; enum psi_states s;
unsigned int seq; unsigned int seq;
...@@ -256,6 +259,8 @@ static void get_recent_times(struct psi_group *group, int cpu, ...@@ -256,6 +259,8 @@ static void get_recent_times(struct psi_group *group, int cpu,
memcpy(times, groupc->times, sizeof(groupc->times)); memcpy(times, groupc->times, sizeof(groupc->times));
state_mask = groupc->state_mask; state_mask = groupc->state_mask;
state_start = groupc->state_start; state_start = groupc->state_start;
if (cpu == current_cpu)
memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
} while (read_seqcount_retry(&groupc->seq, seq)); } while (read_seqcount_retry(&groupc->seq, seq));
/* Calculate state time deltas against the previous snapshot */ /* Calculate state time deltas against the previous snapshot */
...@@ -280,6 +285,28 @@ static void get_recent_times(struct psi_group *group, int cpu, ...@@ -280,6 +285,28 @@ static void get_recent_times(struct psi_group *group, int cpu,
if (delta) if (delta)
*pchanged_states |= (1 << s); *pchanged_states |= (1 << s);
} }
/*
* When collect_percpu_times() from the avgs_work, we don't want to
* re-arm avgs_work when all CPUs are IDLE. But the current CPU running
* this avgs_work is never IDLE, cause avgs_work can't be shut off.
* So for the current CPU, we need to re-arm avgs_work only when
* (NR_RUNNING > 1 || NR_IOWAIT > 0 || NR_MEMSTALL > 0), for other CPUs
* we can just check PSI_NONIDLE delta.
*/
if (current_work() == &group->avgs_work.work) {
bool reschedule;
if (cpu == current_cpu)
reschedule = tasks[NR_RUNNING] +
tasks[NR_IOWAIT] +
tasks[NR_MEMSTALL] > 1;
else
reschedule = *pchanged_states & (1 << PSI_NONIDLE);
if (reschedule)
*pchanged_states |= PSI_STATE_RESCHEDULE;
}
} }
static void calc_avgs(unsigned long avg[3], int missed_periods, static void calc_avgs(unsigned long avg[3], int missed_periods,
...@@ -415,7 +442,6 @@ static void psi_avgs_work(struct work_struct *work) ...@@ -415,7 +442,6 @@ static void psi_avgs_work(struct work_struct *work)
struct delayed_work *dwork; struct delayed_work *dwork;
struct psi_group *group; struct psi_group *group;
u32 changed_states; u32 changed_states;
bool nonidle;
u64 now; u64 now;
dwork = to_delayed_work(work); dwork = to_delayed_work(work);
...@@ -426,7 +452,6 @@ static void psi_avgs_work(struct work_struct *work) ...@@ -426,7 +452,6 @@ static void psi_avgs_work(struct work_struct *work)
now = sched_clock(); now = sched_clock();
collect_percpu_times(group, PSI_AVGS, &changed_states); collect_percpu_times(group, PSI_AVGS, &changed_states);
nonidle = changed_states & (1 << PSI_NONIDLE);
/* /*
* If there is task activity, periodically fold the per-cpu * If there is task activity, periodically fold the per-cpu
* times and feed samples into the running averages. If things * times and feed samples into the running averages. If things
...@@ -437,7 +462,7 @@ static void psi_avgs_work(struct work_struct *work) ...@@ -437,7 +462,7 @@ static void psi_avgs_work(struct work_struct *work)
if (now >= group->avg_next_update) if (now >= group->avg_next_update)
group->avg_next_update = update_averages(group, now); group->avg_next_update = update_averages(group, now);
if (nonidle) { if (changed_states & PSI_STATE_RESCHEDULE) {
schedule_delayed_work(dwork, nsecs_to_jiffies( schedule_delayed_work(dwork, nsecs_to_jiffies(
group->avg_next_update - now) + 1); group->avg_next_update - now) + 1);
} }
...@@ -539,11 +564,13 @@ static u64 update_triggers(struct psi_group *group, u64 now) ...@@ -539,11 +564,13 @@ static u64 update_triggers(struct psi_group *group, u64 now)
/* Calculate growth since last update */ /* Calculate growth since last update */
growth = window_update(&t->win, now, total[t->state]); growth = window_update(&t->win, now, total[t->state]);
if (!t->pending_event) {
if (growth < t->threshold) if (growth < t->threshold)
continue; continue;
t->pending_event = true; t->pending_event = true;
} }
}
/* Limit event signaling to once per window */ /* Limit event signaling to once per window */
if (now < t->last_event_time + t->win.size) if (now < t->last_event_time + t->win.size)
continue; continue;
...@@ -563,18 +590,17 @@ static u64 update_triggers(struct psi_group *group, u64 now) ...@@ -563,18 +590,17 @@ static u64 update_triggers(struct psi_group *group, u64 now)
return now + group->poll_min_period; return now + group->poll_min_period;
} }
/* Schedule polling if it's not already scheduled. */ /* Schedule polling if it's not already scheduled or forced. */
static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay) static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay,
bool force)
{ {
struct task_struct *task; struct task_struct *task;
/* /*
* Do not reschedule if already scheduled. * atomic_xchg should be called even when !force to provide a
* Possible race with a timer scheduled after this check but before * full memory barrier (see the comment inside psi_poll_work).
* mod_timer below can be tolerated because group->polling_next_update
* will keep updates on schedule.
*/ */
if (timer_pending(&group->poll_timer)) if (atomic_xchg(&group->poll_scheduled, 1) && !force)
return; return;
rcu_read_lock(); rcu_read_lock();
...@@ -586,12 +612,15 @@ static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay) ...@@ -586,12 +612,15 @@ static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
*/ */
if (likely(task)) if (likely(task))
mod_timer(&group->poll_timer, jiffies + delay); mod_timer(&group->poll_timer, jiffies + delay);
else
atomic_set(&group->poll_scheduled, 0);
rcu_read_unlock(); rcu_read_unlock();
} }
static void psi_poll_work(struct psi_group *group) static void psi_poll_work(struct psi_group *group)
{ {
bool force_reschedule = false;
u32 changed_states; u32 changed_states;
u64 now; u64 now;
...@@ -599,6 +628,43 @@ static void psi_poll_work(struct psi_group *group) ...@@ -599,6 +628,43 @@ static void psi_poll_work(struct psi_group *group)
now = sched_clock(); now = sched_clock();
if (now > group->polling_until) {
/*
* We are either about to start or might stop polling if no
* state change was recorded. Resetting poll_scheduled leaves
* a small window for psi_group_change to sneak in and schedule
* an immediate poll_work before we get to rescheduling. One
* potential extra wakeup at the end of the polling window
* should be negligible and polling_next_update still keeps
* updates correctly on schedule.
*/
atomic_set(&group->poll_scheduled, 0);
/*
* A task change can race with the poll worker that is supposed to
* report on it. To avoid missing events, ensure ordering between
* poll_scheduled and the task state accesses, such that if the poll
* worker misses the state update, the task change is guaranteed to
* reschedule the poll worker:
*
* poll worker:
* atomic_set(poll_scheduled, 0)
* smp_mb()
* LOAD states
*
* task change:
* STORE states
* if atomic_xchg(poll_scheduled, 1) == 0:
* schedule poll worker
*
* The atomic_xchg() implies a full barrier.
*/
smp_mb();
} else {
/* Polling window is not over, keep rescheduling */
force_reschedule = true;
}
collect_percpu_times(group, PSI_POLL, &changed_states); collect_percpu_times(group, PSI_POLL, &changed_states);
if (changed_states & group->poll_states) { if (changed_states & group->poll_states) {
...@@ -624,7 +690,8 @@ static void psi_poll_work(struct psi_group *group) ...@@ -624,7 +690,8 @@ static void psi_poll_work(struct psi_group *group)
group->polling_next_update = update_triggers(group, now); group->polling_next_update = update_triggers(group, now);
psi_schedule_poll_work(group, psi_schedule_poll_work(group,
nsecs_to_jiffies(group->polling_next_update - now) + 1); nsecs_to_jiffies(group->polling_next_update - now) + 1,
force_reschedule);
out: out:
mutex_unlock(&group->trigger_lock); mutex_unlock(&group->trigger_lock);
...@@ -785,7 +852,7 @@ static void psi_group_change(struct psi_group *group, int cpu, ...@@ -785,7 +852,7 @@ static void psi_group_change(struct psi_group *group, int cpu,
write_seqcount_end(&groupc->seq); write_seqcount_end(&groupc->seq);
if (state_mask & group->poll_states) if (state_mask & group->poll_states)
psi_schedule_poll_work(group, 1); psi_schedule_poll_work(group, 1, false);
if (wake_clock && !delayed_work_pending(&group->avgs_work)) if (wake_clock && !delayed_work_pending(&group->avgs_work))
schedule_delayed_work(&group->avgs_work, PSI_FREQ); schedule_delayed_work(&group->avgs_work, PSI_FREQ);
...@@ -939,7 +1006,7 @@ void psi_account_irqtime(struct task_struct *task, u32 delta) ...@@ -939,7 +1006,7 @@ void psi_account_irqtime(struct task_struct *task, u32 delta)
write_seqcount_end(&groupc->seq); write_seqcount_end(&groupc->seq);
if (group->poll_states & (1 << PSI_IRQ_FULL)) if (group->poll_states & (1 << PSI_IRQ_FULL))
psi_schedule_poll_work(group, 1); psi_schedule_poll_work(group, 1, false);
} while ((group = group->parent)); } while ((group = group->parent));
} }
#endif #endif
...@@ -1325,6 +1392,7 @@ void psi_trigger_destroy(struct psi_trigger *t) ...@@ -1325,6 +1392,7 @@ void psi_trigger_destroy(struct psi_trigger *t)
* can no longer be found through group->poll_task. * can no longer be found through group->poll_task.
*/ */
kthread_stop(task_to_destroy); kthread_stop(task_to_destroy);
atomic_set(&group->poll_scheduled, 0);
} }
kfree(t); kfree(t);
} }
......
...@@ -1041,6 +1041,7 @@ struct rq { ...@@ -1041,6 +1041,7 @@ struct rq {
unsigned long cpu_capacity; unsigned long cpu_capacity;
unsigned long cpu_capacity_orig; unsigned long cpu_capacity_orig;
unsigned long cpu_capacity_inverted;
struct balance_callback *balance_callback; struct balance_callback *balance_callback;
...@@ -1150,6 +1151,9 @@ struct rq { ...@@ -1150,6 +1151,9 @@ struct rq {
unsigned int core_forceidle_occupation; unsigned int core_forceidle_occupation;
u64 core_forceidle_start; u64 core_forceidle_start;
#endif #endif
/* Scratch cpumask to be temporarily used under rq_lock */
cpumask_var_t scratch_mask;
}; };
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
...@@ -1877,6 +1881,13 @@ static inline void dirty_sched_domain_sysctl(int cpu) ...@@ -1877,6 +1881,13 @@ static inline void dirty_sched_domain_sysctl(int cpu)
#endif #endif
extern int sched_update_scaling(void); extern int sched_update_scaling(void);
static inline const struct cpumask *task_user_cpus(struct task_struct *p)
{
if (!p->user_cpus_ptr)
return cpu_possible_mask; /* &init_task.cpus_mask */
return p->user_cpus_ptr;
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#include "stats.h" #include "stats.h"
...@@ -2144,6 +2155,12 @@ extern const u32 sched_prio_to_wmult[40]; ...@@ -2144,6 +2155,12 @@ extern const u32 sched_prio_to_wmult[40];
#define RETRY_TASK ((void *)-1UL) #define RETRY_TASK ((void *)-1UL)
struct affinity_context {
const struct cpumask *new_mask;
struct cpumask *user_mask;
unsigned int flags;
};
struct sched_class { struct sched_class {
#ifdef CONFIG_UCLAMP_TASK #ifdef CONFIG_UCLAMP_TASK
...@@ -2172,9 +2189,7 @@ struct sched_class { ...@@ -2172,9 +2189,7 @@ struct sched_class {
void (*task_woken)(struct rq *this_rq, struct task_struct *task); void (*task_woken)(struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p, void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx);
const struct cpumask *newmask,
u32 flags);
void (*rq_online)(struct rq *rq); void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq); void (*rq_offline)(struct rq *rq);
...@@ -2285,7 +2300,7 @@ extern void update_group_capacity(struct sched_domain *sd, int cpu); ...@@ -2285,7 +2300,7 @@ extern void update_group_capacity(struct sched_domain *sd, int cpu);
extern void trigger_load_balance(struct rq *rq); extern void trigger_load_balance(struct rq *rq);
extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags); extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx);
static inline struct task_struct *get_push_task(struct rq *rq) static inline struct task_struct *get_push_task(struct rq *rq)
{ {
...@@ -2878,6 +2893,24 @@ static inline unsigned long capacity_orig_of(int cpu) ...@@ -2878,6 +2893,24 @@ static inline unsigned long capacity_orig_of(int cpu)
return cpu_rq(cpu)->cpu_capacity_orig; return cpu_rq(cpu)->cpu_capacity_orig;
} }
/*
* Returns inverted capacity if the CPU is in capacity inversion state.
* 0 otherwise.
*
* Capacity inversion detection only considers thermal impact where actual
* performance points (OPPs) gets dropped.
*
* Capacity inversion state happens when another performance domain that has
* equal or lower capacity_orig_of() becomes effectively larger than the perf
* domain this CPU belongs to due to thermal pressure throttling it hard.
*
* See comment in update_cpu_capacity().
*/
static inline unsigned long cpu_in_capacity_inversion(int cpu)
{
return cpu_rq(cpu)->cpu_capacity_inverted;
}
/** /**
* enum cpu_util_type - CPU utilization type * enum cpu_util_type - CPU utilization type
* @FREQUENCY_UTIL: Utilization used to select frequency * @FREQUENCY_UTIL: Utilization used to select frequency
...@@ -2979,6 +3012,23 @@ static inline unsigned long cpu_util_rt(struct rq *rq) ...@@ -2979,6 +3012,23 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
#ifdef CONFIG_UCLAMP_TASK #ifdef CONFIG_UCLAMP_TASK
unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
static inline unsigned long uclamp_rq_get(struct rq *rq,
enum uclamp_id clamp_id)
{
return READ_ONCE(rq->uclamp[clamp_id].value);
}
static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
unsigned int value)
{
WRITE_ONCE(rq->uclamp[clamp_id].value, value);
}
static inline bool uclamp_rq_is_idle(struct rq *rq)
{
return rq->uclamp_flags & UCLAMP_FLAG_IDLE;
}
/** /**
* uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.
* @rq: The rq to clamp against. Must not be NULL. * @rq: The rq to clamp against. Must not be NULL.
...@@ -3014,12 +3064,12 @@ unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, ...@@ -3014,12 +3064,12 @@ unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
* Ignore last runnable task's max clamp, as this task will * Ignore last runnable task's max clamp, as this task will
* reset it. Similarly, no need to read the rq's min clamp. * reset it. Similarly, no need to read the rq's min clamp.
*/ */
if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) if (uclamp_rq_is_idle(rq))
goto out; goto out;
} }
min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value)); min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN));
max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value)); max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX));
out: out:
/* /*
* Since CPU's {min,max}_util clamps are MAX aggregated considering * Since CPU's {min,max}_util clamps are MAX aggregated considering
...@@ -3060,6 +3110,15 @@ static inline bool uclamp_is_used(void) ...@@ -3060,6 +3110,15 @@ static inline bool uclamp_is_used(void)
return static_branch_likely(&sched_uclamp_used); return static_branch_likely(&sched_uclamp_used);
} }
#else /* CONFIG_UCLAMP_TASK */ #else /* CONFIG_UCLAMP_TASK */
static inline unsigned long uclamp_eff_value(struct task_struct *p,
enum uclamp_id clamp_id)
{
if (clamp_id == UCLAMP_MIN)
return 0;
return SCHED_CAPACITY_SCALE;
}
static inline static inline
unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
struct task_struct *p) struct task_struct *p)
...@@ -3073,6 +3132,25 @@ static inline bool uclamp_is_used(void) ...@@ -3073,6 +3132,25 @@ static inline bool uclamp_is_used(void)
{ {
return false; return false;
} }
static inline unsigned long uclamp_rq_get(struct rq *rq,
enum uclamp_id clamp_id)
{
if (clamp_id == UCLAMP_MIN)
return 0;
return SCHED_CAPACITY_SCALE;
}
static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
unsigned int value)
{
}
static inline bool uclamp_rq_is_idle(struct rq *rq)
{
return false;
}
#endif /* CONFIG_UCLAMP_TASK */ #endif /* CONFIG_UCLAMP_TASK */
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
......
...@@ -128,11 +128,9 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup) ...@@ -128,11 +128,9 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
if (p->in_memstall) if (p->in_memstall)
set |= TSK_MEMSTALL_RUNNING; set |= TSK_MEMSTALL_RUNNING;
if (!wakeup || p->sched_psi_wake_requeue) { if (!wakeup) {
if (p->in_memstall) if (p->in_memstall)
set |= TSK_MEMSTALL; set |= TSK_MEMSTALL;
if (p->sched_psi_wake_requeue)
p->sched_psi_wake_requeue = 0;
} else { } else {
if (p->in_iowait) if (p->in_iowait)
clear |= TSK_IOWAIT; clear |= TSK_IOWAIT;
...@@ -143,8 +141,6 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup) ...@@ -143,8 +141,6 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
static inline void psi_dequeue(struct task_struct *p, bool sleep) static inline void psi_dequeue(struct task_struct *p, bool sleep)
{ {
int clear = TSK_RUNNING;
if (static_branch_likely(&psi_disabled)) if (static_branch_likely(&psi_disabled))
return; return;
...@@ -157,10 +153,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep) ...@@ -157,10 +153,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
if (sleep) if (sleep)
return; return;
if (p->in_memstall) psi_task_change(p, p->psi_flags, 0);
clear |= (TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
psi_task_change(p, clear, 0);
} }
static inline void psi_ttwu_dequeue(struct task_struct *p) static inline void psi_ttwu_dequeue(struct task_struct *p)
...@@ -172,19 +165,12 @@ static inline void psi_ttwu_dequeue(struct task_struct *p) ...@@ -172,19 +165,12 @@ static inline void psi_ttwu_dequeue(struct task_struct *p)
* deregister its sleep-persistent psi states from the old * deregister its sleep-persistent psi states from the old
* queue, and let psi_enqueue() know it has to requeue. * queue, and let psi_enqueue() know it has to requeue.
*/ */
if (unlikely(p->in_iowait || p->in_memstall)) { if (unlikely(p->psi_flags)) {
struct rq_flags rf; struct rq_flags rf;
struct rq *rq; struct rq *rq;
int clear = 0;
if (p->in_iowait)
clear |= TSK_IOWAIT;
if (p->in_memstall)
clear |= TSK_MEMSTALL;
rq = __task_rq_lock(p, &rf); rq = __task_rq_lock(p, &rf);
psi_task_change(p, clear, 0); psi_task_change(p, p->psi_flags, 0);
p->sched_psi_wake_requeue = 1;
__task_rq_unlock(rq, &rf); __task_rq_unlock(rq, &rf);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment