Commit 1f41906a authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar

sched/fair: Correctly handle nohz ticks CPU load accounting

Ticks can happen while the CPU is in dynticks-idle or dynticks-singletask
mode. In fact "nohz" or "dynticks" only mean that we exit the periodic
mode and we try to minimize the ticks as much as possible. The nohz
subsystem uses a confusing terminology with the internal state
"ts->tick_stopped" which is also available through its public interface
with tick_nohz_tick_stopped(). This is a misnomer as the tick is instead
reduced with the best effort rather than stopped. In the best case the
tick can indeed be actually stopped but there is no guarantee about that.
If a timer needs to fire one second later, a tick will fire while the
CPU is in nohz mode and this is a very common scenario.

Now this confusion happens to be a problem with CPU load updates:
cpu_load_update_active() doesn't handle nohz ticks correctly because it
assumes that ticks are completely stopped in nohz mode and that
cpu_load_update_active() can't be called in dynticks mode. When that
happens, the whole previous tickless load is ignored and the function
just records the load for the current tick, ignoring potentially long
idle periods behind.

In order to solve this, we could account the current load for the
previous nohz time but there is a risk that we account the load of a
task that got freshly enqueued for the whole nohz period.

So instead, lets record the dynticks load on nohz frame entry so we know
what to record in case of nohz ticks, then use this record to account
the tickless load on nohz ticks and nohz frame end.
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Byungchul Park <byungchul.park@lge.com>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1460555812-25375-3-git-send-email-fweisbec@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent cee1afce
......@@ -178,9 +178,11 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void cpu_load_update_nohz(int active);
extern void cpu_load_update_nohz_start(void);
extern void cpu_load_update_nohz_stop(void);
#else
static inline void cpu_load_update_nohz(int active) { }
static inline void cpu_load_update_nohz_start(void) { }
static inline void cpu_load_update_nohz_stop(void) { }
#endif
extern void dump_cpu_task(int cpu);
......
......@@ -4563,7 +4563,6 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
* @this_rq: The rq to update statistics for
* @this_load: The current load
* @pending_updates: The number of missed updates
* @active: !0 for NOHZ_FULL
*
* Update rq->cpu_load[] statistics. This function is usually called every
* scheduler tick (TICK_NSEC).
......@@ -4592,12 +4591,12 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
* load[i]_n = (1 - 1/2^i)^n * load[i]_0
*
* see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
* term. See the @active paramter.
* term.
*/
static void __cpu_load_update(struct rq *this_rq, unsigned long this_load,
unsigned long pending_updates, int active)
static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
unsigned long pending_updates)
{
unsigned long tickless_load = active ? this_rq->cpu_load[0] : 0;
unsigned long tickless_load = this_rq->cpu_load[0];
int i, scale;
this_rq->nr_load_updates++;
......@@ -4642,10 +4641,23 @@ static unsigned long weighted_cpuload(const int cpu)
}
#ifdef CONFIG_NO_HZ_COMMON
static void __cpu_load_update_nohz(struct rq *this_rq,
unsigned long curr_jiffies,
unsigned long load,
int active)
/*
* There is no sane way to deal with nohz on smp when using jiffies because the
* cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
* causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
*
* Therefore we need to avoid the delta approach from the regular tick when
* possible since that would seriously skew the load calculation. This is why we
* use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on
* jiffies deltas for updates happening while in nohz mode (idle ticks, idle
* loop exit, nohz_idle_balance, nohz full exit...)
*
* This means we might still be one tick off for nohz periods.
*/
static void cpu_load_update_nohz(struct rq *this_rq,
unsigned long curr_jiffies,
unsigned long load)
{
unsigned long pending_updates;
......@@ -4657,23 +4669,10 @@ static void __cpu_load_update_nohz(struct rq *this_rq,
* In the NOHZ_FULL case, we were non-idle, we should consider
* its weighted load.
*/
__cpu_load_update(this_rq, load, pending_updates, active);
cpu_load_update(this_rq, load, pending_updates);
}
}
/*
* There is no sane way to deal with nohz on smp when using jiffies because the
* cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
* causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
*
* Therefore we cannot use the delta approach from the regular tick since that
* would seriously skew the load calculation. However we'll make do for those
* updates happening while idle (nohz_idle_balance) or coming out of idle
* (tick_nohz_idle_exit).
*
* This means we might still be one tick off for nohz periods.
*/
/*
* Called from nohz_idle_balance() to update the load ratings before doing the
* idle balance.
......@@ -4686,26 +4685,56 @@ static void cpu_load_update_idle(struct rq *this_rq)
if (weighted_cpuload(cpu_of(this_rq)))
return;
__cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0, 0);
cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0);
}
/*
* Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
* Record CPU load on nohz entry so we know the tickless load to account
* on nohz exit. cpu_load[0] happens then to be updated more frequently
* than other cpu_load[idx] but it should be fine as cpu_load readers
* shouldn't rely into synchronized cpu_load[*] updates.
*/
void cpu_load_update_nohz(int active)
void cpu_load_update_nohz_start(void)
{
struct rq *this_rq = this_rq();
/*
* This is all lockless but should be fine. If weighted_cpuload changes
* concurrently we'll exit nohz. And cpu_load write can race with
* cpu_load_update_idle() but both updater would be writing the same.
*/
this_rq->cpu_load[0] = weighted_cpuload(cpu_of(this_rq));
}
/*
* Account the tickless load in the end of a nohz frame.
*/
void cpu_load_update_nohz_stop(void)
{
unsigned long curr_jiffies = READ_ONCE(jiffies);
unsigned long load = active ? weighted_cpuload(cpu_of(this_rq)) : 0;
struct rq *this_rq = this_rq();
unsigned long load;
if (curr_jiffies == this_rq->last_load_update_tick)
return;
load = weighted_cpuload(cpu_of(this_rq));
raw_spin_lock(&this_rq->lock);
__cpu_load_update_nohz(this_rq, curr_jiffies, load, active);
cpu_load_update_nohz(this_rq, curr_jiffies, load);
raw_spin_unlock(&this_rq->lock);
}
#endif /* CONFIG_NO_HZ */
#else /* !CONFIG_NO_HZ_COMMON */
static inline void cpu_load_update_nohz(struct rq *this_rq,
unsigned long curr_jiffies,
unsigned long load) { }
#endif /* CONFIG_NO_HZ_COMMON */
static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load)
{
/* See the mess around cpu_load_update_nohz(). */
this_rq->last_load_update_tick = READ_ONCE(jiffies);
cpu_load_update(this_rq, load, 1);
}
/*
* Called from scheduler_tick()
......@@ -4713,11 +4742,11 @@ void cpu_load_update_nohz(int active)
void cpu_load_update_active(struct rq *this_rq)
{
unsigned long load = weighted_cpuload(cpu_of(this_rq));
/*
* See the mess around cpu_load_update_idle() / cpu_load_update_nohz().
*/
this_rq->last_load_update_tick = jiffies;
__cpu_load_update(this_rq, load, 1, 1);
if (tick_nohz_tick_stopped())
cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load);
else
cpu_load_update_periodic(this_rq, load);
}
/*
......
......@@ -776,6 +776,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
if (!ts->tick_stopped) {
nohz_balance_enter_idle(cpu);
calc_load_enter_idle();
cpu_load_update_nohz_start();
ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
ts->tick_stopped = 1;
......@@ -802,11 +803,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
return tick;
}
static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now, int active)
static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
{
/* Update jiffies first */
tick_do_update_jiffies64(now);
cpu_load_update_nohz(active);
cpu_load_update_nohz_stop();
calc_load_exit_idle();
touch_softlockup_watchdog_sched();
......@@ -833,7 +834,7 @@ static void tick_nohz_full_update_tick(struct tick_sched *ts)
if (can_stop_full_tick(ts))
tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
else if (ts->tick_stopped)
tick_nohz_restart_sched_tick(ts, ktime_get(), 1);
tick_nohz_restart_sched_tick(ts, ktime_get());
#endif
}
......@@ -1024,7 +1025,7 @@ void tick_nohz_idle_exit(void)
tick_nohz_stop_idle(ts, now);
if (ts->tick_stopped) {
tick_nohz_restart_sched_tick(ts, now, 0);
tick_nohz_restart_sched_tick(ts, now);
tick_nohz_account_idle_ticks(ts);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment