Commit 7fae6c81 authored by Chengming Zhou's avatar Chengming Zhou Committed by Ingo Molnar

psi: Use ONCPU state tracking machinery to detect reclaim

Move the reclaim detection from the timer tick to the task state
tracking machinery using the recently added ONCPU state. And we
also add task psi_flags changes checking in the psi_task_switch()
optimization to update the parents properly.

In terms of performance and cost, this ONCPU task state tracking
is not cheaper than previous timer tick in aggregate. But the code is
simpler and shorter this way, so it's a maintainability win. And
Johannes did some testing with perf bench, the performace and cost
changes would be acceptable for real workloads.

Thanks to Johannes Weiner for pointing out the psi_task_switch()
optimization things and the clearer changelog.
Co-developed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Signed-off-by: default avatarMuchun Song <songmuchun@bytedance.com>
Signed-off-by: default avatarChengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Link: https://lkml.kernel.org/r/20210303034659.91735-3-zhouchengming@bytedance.com
parent e7fcd762
...@@ -20,7 +20,6 @@ void psi_task_change(struct task_struct *task, int clear, int set); ...@@ -20,7 +20,6 @@ void psi_task_change(struct task_struct *task, int clear, int set);
void psi_task_switch(struct task_struct *prev, struct task_struct *next, void psi_task_switch(struct task_struct *prev, struct task_struct *next,
bool sleep); bool sleep);
void psi_memstall_tick(struct task_struct *task, int cpu);
void psi_memstall_enter(unsigned long *flags); void psi_memstall_enter(unsigned long *flags);
void psi_memstall_leave(unsigned long *flags); void psi_memstall_leave(unsigned long *flags);
......
...@@ -4551,7 +4551,6 @@ void scheduler_tick(void) ...@@ -4551,7 +4551,6 @@ void scheduler_tick(void)
update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
curr->sched_class->task_tick(rq, curr, 0); curr->sched_class->task_tick(rq, curr, 0);
calc_global_load_tick(rq); calc_global_load_tick(rq);
psi_task_tick(rq);
rq_unlock(rq, &rf); rq_unlock(rq, &rf);
......
...@@ -644,8 +644,7 @@ static void poll_timer_fn(struct timer_list *t) ...@@ -644,8 +644,7 @@ static void poll_timer_fn(struct timer_list *t)
wake_up_interruptible(&group->poll_wait); wake_up_interruptible(&group->poll_wait);
} }
static void record_times(struct psi_group_cpu *groupc, int cpu, static void record_times(struct psi_group_cpu *groupc, int cpu)
bool memstall_tick)
{ {
u32 delta; u32 delta;
u64 now; u64 now;
...@@ -664,23 +663,6 @@ static void record_times(struct psi_group_cpu *groupc, int cpu, ...@@ -664,23 +663,6 @@ static void record_times(struct psi_group_cpu *groupc, int cpu,
groupc->times[PSI_MEM_SOME] += delta; groupc->times[PSI_MEM_SOME] += delta;
if (groupc->state_mask & (1 << PSI_MEM_FULL)) if (groupc->state_mask & (1 << PSI_MEM_FULL))
groupc->times[PSI_MEM_FULL] += delta; groupc->times[PSI_MEM_FULL] += delta;
else if (memstall_tick) {
u32 sample;
/*
* Since we care about lost potential, a
* memstall is FULL when there are no other
* working tasks, but also when the CPU is
* actively reclaiming and nothing productive
* could run even if it were runnable.
*
* When the timer tick sees a reclaiming CPU,
* regardless of runnable tasks, sample a FULL
* tick (or less if it hasn't been a full tick
* since the last state change).
*/
sample = min(delta, (u32)jiffies_to_nsecs(1));
groupc->times[PSI_MEM_FULL] += sample;
}
} }
if (groupc->state_mask & (1 << PSI_CPU_SOME)) { if (groupc->state_mask & (1 << PSI_CPU_SOME)) {
...@@ -714,7 +696,7 @@ static void psi_group_change(struct psi_group *group, int cpu, ...@@ -714,7 +696,7 @@ static void psi_group_change(struct psi_group *group, int cpu,
*/ */
write_seqcount_begin(&groupc->seq); write_seqcount_begin(&groupc->seq);
record_times(groupc, cpu, false); record_times(groupc, cpu);
for (t = 0, m = clear; m; m &= ~(1 << t), t++) { for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
if (!(m & (1 << t))) if (!(m & (1 << t)))
...@@ -738,6 +720,18 @@ static void psi_group_change(struct psi_group *group, int cpu, ...@@ -738,6 +720,18 @@ static void psi_group_change(struct psi_group *group, int cpu,
if (test_state(groupc->tasks, s)) if (test_state(groupc->tasks, s))
state_mask |= (1 << s); state_mask |= (1 << s);
} }
/*
* Since we care about lost potential, a memstall is FULL
* when there are no other working tasks, but also when
* the CPU is actively reclaiming and nothing productive
* could run even if it were runnable. So when the current
* task in a cgroup is in_memstall, the corresponding groupc
* on that cpu is in PSI_MEM_FULL state.
*/
if (groupc->tasks[NR_ONCPU] && cpu_curr(cpu)->in_memstall)
state_mask |= (1 << PSI_MEM_FULL);
groupc->state_mask = state_mask; groupc->state_mask = state_mask;
write_seqcount_end(&groupc->seq); write_seqcount_end(&groupc->seq);
...@@ -823,17 +817,21 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next, ...@@ -823,17 +817,21 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
void *iter; void *iter;
if (next->pid) { if (next->pid) {
bool identical_state;
psi_flags_change(next, 0, TSK_ONCPU); psi_flags_change(next, 0, TSK_ONCPU);
/* /*
* When moving state between tasks, the group that * When switching between tasks that have an identical
* contains them both does not change: we can stop * runtime state, the cgroup that contains both tasks
* updating the tree once we reach the first common * runtime state, the cgroup that contains both tasks
* ancestor. Iterate @next's ancestors until we * we reach the first common ancestor. Iterate @next's
* encounter @prev's state. * ancestors only until we encounter @prev's ONCPU.
*/ */
identical_state = prev->psi_flags == next->psi_flags;
iter = NULL; iter = NULL;
while ((group = iterate_groups(next, &iter))) { while ((group = iterate_groups(next, &iter))) {
if (per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) { if (identical_state &&
per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) {
common = group; common = group;
break; break;
} }
...@@ -859,21 +857,6 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next, ...@@ -859,21 +857,6 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
} }
} }
void psi_memstall_tick(struct task_struct *task, int cpu)
{
struct psi_group *group;
void *iter = NULL;
while ((group = iterate_groups(task, &iter))) {
struct psi_group_cpu *groupc;
groupc = per_cpu_ptr(group->pcpu, cpu);
write_seqcount_begin(&groupc->seq);
record_times(groupc, cpu, true);
write_seqcount_end(&groupc->seq);
}
}
/** /**
* psi_memstall_enter - mark the beginning of a memory stall section * psi_memstall_enter - mark the beginning of a memory stall section
* @flags: flags to handle nested sections * @flags: flags to handle nested sections
......
...@@ -144,14 +144,6 @@ static inline void psi_sched_switch(struct task_struct *prev, ...@@ -144,14 +144,6 @@ static inline void psi_sched_switch(struct task_struct *prev,
psi_task_switch(prev, next, sleep); psi_task_switch(prev, next, sleep);
} }
static inline void psi_task_tick(struct rq *rq)
{
if (static_branch_likely(&psi_disabled))
return;
if (unlikely(rq->curr->in_memstall))
psi_memstall_tick(rq->curr, cpu_of(rq));
}
#else /* CONFIG_PSI */ #else /* CONFIG_PSI */
static inline void psi_enqueue(struct task_struct *p, bool wakeup) {} static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
static inline void psi_dequeue(struct task_struct *p, bool sleep) {} static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
...@@ -159,7 +151,6 @@ static inline void psi_ttwu_dequeue(struct task_struct *p) {} ...@@ -159,7 +151,6 @@ static inline void psi_ttwu_dequeue(struct task_struct *p) {}
static inline void psi_sched_switch(struct task_struct *prev, static inline void psi_sched_switch(struct task_struct *prev,
struct task_struct *next, struct task_struct *next,
bool sleep) {} bool sleep) {}
static inline void psi_task_tick(struct rq *rq) {}
#endif /* CONFIG_PSI */ #endif /* CONFIG_PSI */
#ifdef CONFIG_SCHED_INFO #ifdef CONFIG_SCHED_INFO
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment