Commit 3840cbe2 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

sched: psi: fix bogus pressure spikes from aggregation race

Brandon reports sporadic, non-sensical spikes in cumulative pressure
time (total=) when reading cpu.pressure at a high rate. This is due to
a race condition between reader aggregation and tasks changing states.

While it affects all states and all resources captured by PSI, in
practice it most likely triggers with CPU pressure, since scheduling
events are so frequent compared to other resource events.

The race context is the live snooping of ongoing stalls during a
pressure read. The read aggregates per-cpu records for stalls that
have concluded, but will also incorporate ad-hoc the duration of any
active state that hasn't been recorded yet. This is important to get
timely measurements of ongoing stalls. Those ad-hoc samples are
calculated on-the-fly up to the current time on that CPU; since the
stall hasn't concluded, it's expected that this is the minimum amount
of stall time that will enter the per-cpu records once it does.

The problem is that the path that concludes the state uses a CPU clock
read that is not synchronized against aggregators; the clock is read
outside of the seqlock protection. This allows aggregators to race and
snoop a stall with a longer duration than will actually be recorded.

With the recorded stall time being less than the last snapshot
remembered by the aggregator, a subsequent sample will underflow and
observe a bogus delta value, resulting in an erratic jump in pressure.

Fix this by moving the clock read of the state change into the seqlock
protection. This ensures no aggregation can snoop live stalls past the
time that's recorded when the state concludes.
Reported-by: default avatarBrandon Duffany <brandon@buildbuddy.io>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=219194
Link: https://lore.kernel.org/lkml/20240827121851.GB438928@cmpxchg.org/
Fixes: df774306 ("psi: Reduce calls to sched_clock() in psi")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarChengming Zhou <chengming.zhou@linux.dev>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8c245fe7
...@@ -769,12 +769,13 @@ static void record_times(struct psi_group_cpu *groupc, u64 now) ...@@ -769,12 +769,13 @@ static void record_times(struct psi_group_cpu *groupc, u64 now)
} }
static void psi_group_change(struct psi_group *group, int cpu, static void psi_group_change(struct psi_group *group, int cpu,
unsigned int clear, unsigned int set, u64 now, unsigned int clear, unsigned int set,
bool wake_clock) bool wake_clock)
{ {
struct psi_group_cpu *groupc; struct psi_group_cpu *groupc;
unsigned int t, m; unsigned int t, m;
u32 state_mask; u32 state_mask;
u64 now;
lockdep_assert_rq_held(cpu_rq(cpu)); lockdep_assert_rq_held(cpu_rq(cpu));
groupc = per_cpu_ptr(group->pcpu, cpu); groupc = per_cpu_ptr(group->pcpu, cpu);
...@@ -789,6 +790,7 @@ static void psi_group_change(struct psi_group *group, int cpu, ...@@ -789,6 +790,7 @@ static void psi_group_change(struct psi_group *group, int cpu,
* SOME and FULL time these may have resulted in. * SOME and FULL time these may have resulted in.
*/ */
write_seqcount_begin(&groupc->seq); write_seqcount_begin(&groupc->seq);
now = cpu_clock(cpu);
/* /*
* Start with TSK_ONCPU, which doesn't have a corresponding * Start with TSK_ONCPU, which doesn't have a corresponding
...@@ -899,18 +901,15 @@ void psi_task_change(struct task_struct *task, int clear, int set) ...@@ -899,18 +901,15 @@ void psi_task_change(struct task_struct *task, int clear, int set)
{ {
int cpu = task_cpu(task); int cpu = task_cpu(task);
struct psi_group *group; struct psi_group *group;
u64 now;
if (!task->pid) if (!task->pid)
return; return;
psi_flags_change(task, clear, set); psi_flags_change(task, clear, set);
now = cpu_clock(cpu);
group = task_psi_group(task); group = task_psi_group(task);
do { do {
psi_group_change(group, cpu, clear, set, now, true); psi_group_change(group, cpu, clear, set, true);
} while ((group = group->parent)); } while ((group = group->parent));
} }
...@@ -919,7 +918,6 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next, ...@@ -919,7 +918,6 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
{ {
struct psi_group *group, *common = NULL; struct psi_group *group, *common = NULL;
int cpu = task_cpu(prev); int cpu = task_cpu(prev);
u64 now = cpu_clock(cpu);
if (next->pid) { if (next->pid) {
psi_flags_change(next, 0, TSK_ONCPU); psi_flags_change(next, 0, TSK_ONCPU);
...@@ -936,7 +934,7 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next, ...@@ -936,7 +934,7 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
break; break;
} }
psi_group_change(group, cpu, 0, TSK_ONCPU, now, true); psi_group_change(group, cpu, 0, TSK_ONCPU, true);
} while ((group = group->parent)); } while ((group = group->parent));
} }
...@@ -974,7 +972,7 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next, ...@@ -974,7 +972,7 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
do { do {
if (group == common) if (group == common)
break; break;
psi_group_change(group, cpu, clear, set, now, wake_clock); psi_group_change(group, cpu, clear, set, wake_clock);
} while ((group = group->parent)); } while ((group = group->parent));
/* /*
...@@ -986,7 +984,7 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next, ...@@ -986,7 +984,7 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) { if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
clear &= ~TSK_ONCPU; clear &= ~TSK_ONCPU;
for (; group; group = group->parent) for (; group; group = group->parent)
psi_group_change(group, cpu, clear, set, now, wake_clock); psi_group_change(group, cpu, clear, set, wake_clock);
} }
} }
} }
...@@ -997,8 +995,8 @@ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_st ...@@ -997,8 +995,8 @@ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_st
int cpu = task_cpu(curr); int cpu = task_cpu(curr);
struct psi_group *group; struct psi_group *group;
struct psi_group_cpu *groupc; struct psi_group_cpu *groupc;
u64 now, irq;
s64 delta; s64 delta;
u64 irq;
if (static_branch_likely(&psi_disabled)) if (static_branch_likely(&psi_disabled))
return; return;
...@@ -1011,7 +1009,6 @@ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_st ...@@ -1011,7 +1009,6 @@ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_st
if (prev && task_psi_group(prev) == group) if (prev && task_psi_group(prev) == group)
return; return;
now = cpu_clock(cpu);
irq = irq_time_read(cpu); irq = irq_time_read(cpu);
delta = (s64)(irq - rq->psi_irq_time); delta = (s64)(irq - rq->psi_irq_time);
if (delta < 0) if (delta < 0)
...@@ -1019,12 +1016,15 @@ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_st ...@@ -1019,12 +1016,15 @@ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_st
rq->psi_irq_time = irq; rq->psi_irq_time = irq;
do { do {
u64 now;
if (!group->enabled) if (!group->enabled)
continue; continue;
groupc = per_cpu_ptr(group->pcpu, cpu); groupc = per_cpu_ptr(group->pcpu, cpu);
write_seqcount_begin(&groupc->seq); write_seqcount_begin(&groupc->seq);
now = cpu_clock(cpu);
record_times(groupc, now); record_times(groupc, now);
groupc->times[PSI_IRQ_FULL] += delta; groupc->times[PSI_IRQ_FULL] += delta;
...@@ -1223,11 +1223,9 @@ void psi_cgroup_restart(struct psi_group *group) ...@@ -1223,11 +1223,9 @@ void psi_cgroup_restart(struct psi_group *group)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
struct rq_flags rf; struct rq_flags rf;
u64 now;
rq_lock_irq(rq, &rf); rq_lock_irq(rq, &rf);
now = cpu_clock(cpu); psi_group_change(group, cpu, 0, 0, true);
psi_group_change(group, cpu, 0, 0, now, true);
rq_unlock_irq(rq, &rf); rq_unlock_irq(rq, &rf);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment