Commit 9d7fb042 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/cputime: Guarantee stime + utime == rtime

While the current code guarantees monotonicity for stime and utime
independently of one another, it does not guarantee that the sum of
both is equal to the total time we started out with.

This confuses things (and peoples) who look at this sum, like top, and
will report >100% usage followed by a matching period of 0%.

Rework the code to provide both individual monotonicity and a coherent
sum.
Suggested-by: default avatarFredrik Markstrom <fredrik.markstrom@gmail.com>
Reported-by: default avatarFredrik Markstrom <fredrik.markstrom@gmail.com>
Tested-by: default avatarFredrik Markstrom <fredrik.markstrom@gmail.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Stanislaw Gruszka <sgruszka@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: jason.low2@hp.com
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 781b0203
...@@ -32,6 +32,14 @@ extern struct fs_struct init_fs; ...@@ -32,6 +32,14 @@ extern struct fs_struct init_fs;
#define INIT_CPUSET_SEQ(tsk) #define INIT_CPUSET_SEQ(tsk)
#endif #endif
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
.lock = __RAW_SPIN_LOCK_UNLOCKED(x.prev_cputime.lock), \
},
#else
#define INIT_PREV_CPUTIME(x)
#endif
#define INIT_SIGNALS(sig) { \ #define INIT_SIGNALS(sig) { \
.nr_threads = 1, \ .nr_threads = 1, \
.thread_head = LIST_HEAD_INIT(init_task.thread_node), \ .thread_head = LIST_HEAD_INIT(init_task.thread_node), \
...@@ -46,6 +54,7 @@ extern struct fs_struct init_fs; ...@@ -46,6 +54,7 @@ extern struct fs_struct init_fs;
.cputime_atomic = INIT_CPUTIME_ATOMIC, \ .cputime_atomic = INIT_CPUTIME_ATOMIC, \
.running = 0, \ .running = 0, \
}, \ }, \
INIT_PREV_CPUTIME(sig) \
.cred_guard_mutex = \ .cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \ __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
} }
...@@ -246,6 +255,7 @@ extern struct task_group root_task_group; ...@@ -246,6 +255,7 @@ extern struct task_group root_task_group;
INIT_TASK_RCU_TASKS(tsk) \ INIT_TASK_RCU_TASKS(tsk) \
INIT_CPUSET_SEQ(tsk) \ INIT_CPUSET_SEQ(tsk) \
INIT_RT_MUTEXES(tsk) \ INIT_RT_MUTEXES(tsk) \
INIT_PREV_CPUTIME(tsk) \
INIT_VTIME(tsk) \ INIT_VTIME(tsk) \
INIT_NUMA_BALANCING(tsk) \ INIT_NUMA_BALANCING(tsk) \
INIT_KASAN(tsk) \ INIT_KASAN(tsk) \
......
...@@ -530,39 +530,49 @@ struct cpu_itimer { ...@@ -530,39 +530,49 @@ struct cpu_itimer {
}; };
/** /**
* struct cputime - snaphsot of system and user cputime * struct prev_cputime - snaphsot of system and user cputime
* @utime: time spent in user mode * @utime: time spent in user mode
* @stime: time spent in system mode * @stime: time spent in system mode
* @lock: protects the above two fields
* *
* Gathers a generic snapshot of user and system time. * Stores previous user/system time values such that we can guarantee
* monotonicity.
*/ */
struct cputime { struct prev_cputime {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
cputime_t utime; cputime_t utime;
cputime_t stime; cputime_t stime;
raw_spinlock_t lock;
#endif
}; };
static inline void prev_cputime_init(struct prev_cputime *prev)
{
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
prev->utime = prev->stime = 0;
raw_spin_lock_init(&prev->lock);
#endif
}
/** /**
* struct task_cputime - collected CPU time counts * struct task_cputime - collected CPU time counts
* @utime: time spent in user mode, in &cputime_t units * @utime: time spent in user mode, in &cputime_t units
* @stime: time spent in kernel mode, in &cputime_t units * @stime: time spent in kernel mode, in &cputime_t units
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
* *
* This is an extension of struct cputime that includes the total runtime * This structure groups together three kinds of CPU time that are tracked for
* spent by the task from the scheduler point of view. * threads and thread groups. Most things considering CPU time want to group
* * these counts together and treat all three of them in parallel.
* As a result, this structure groups together three kinds of CPU time
* that are tracked for threads and thread groups. Most things considering
* CPU time want to group these counts together and treat all three
* of them in parallel.
*/ */
struct task_cputime { struct task_cputime {
cputime_t utime; cputime_t utime;
cputime_t stime; cputime_t stime;
unsigned long long sum_exec_runtime; unsigned long long sum_exec_runtime;
}; };
/* Alternate field names when used to cache expirations. */ /* Alternate field names when used to cache expirations. */
#define prof_exp stime
#define virt_exp utime #define virt_exp utime
#define prof_exp stime
#define sched_exp sum_exec_runtime #define sched_exp sum_exec_runtime
#define INIT_CPUTIME \ #define INIT_CPUTIME \
...@@ -715,9 +725,7 @@ struct signal_struct { ...@@ -715,9 +725,7 @@ struct signal_struct {
cputime_t utime, stime, cutime, cstime; cputime_t utime, stime, cutime, cstime;
cputime_t gtime; cputime_t gtime;
cputime_t cgtime; cputime_t cgtime;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE struct prev_cputime prev_cputime;
struct cputime prev_cputime;
#endif
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
unsigned long inblock, oublock, cinblock, coublock; unsigned long inblock, oublock, cinblock, coublock;
...@@ -1481,9 +1489,7 @@ struct task_struct { ...@@ -1481,9 +1489,7 @@ struct task_struct {
cputime_t utime, stime, utimescaled, stimescaled; cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime; cputime_t gtime;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE struct prev_cputime prev_cputime;
struct cputime prev_cputime;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
seqlock_t vtime_seqlock; seqlock_t vtime_seqlock;
unsigned long long vtime_snap; unsigned long long vtime_snap;
......
...@@ -1067,6 +1067,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) ...@@ -1067,6 +1067,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
rcu_assign_pointer(tsk->sighand, sig); rcu_assign_pointer(tsk->sighand, sig);
if (!sig) if (!sig)
return -ENOMEM; return -ENOMEM;
atomic_set(&sig->count, 1); atomic_set(&sig->count, 1);
memcpy(sig->action, current->sighand->action, sizeof(sig->action)); memcpy(sig->action, current->sighand->action, sizeof(sig->action));
return 0; return 0;
...@@ -1128,6 +1129,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) ...@@ -1128,6 +1129,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
init_sigpending(&sig->shared_pending); init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers); INIT_LIST_HEAD(&sig->posix_timers);
seqlock_init(&sig->stats_lock); seqlock_init(&sig->stats_lock);
prev_cputime_init(&sig->prev_cputime);
hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
sig->real_timer.function = it_real_fn; sig->real_timer.function = it_real_fn;
...@@ -1335,9 +1337,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1335,9 +1337,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->utime = p->stime = p->gtime = 0; p->utime = p->stime = p->gtime = 0;
p->utimescaled = p->stimescaled = 0; p->utimescaled = p->stimescaled = 0;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE prev_cputime_init(&p->prev_cputime);
p->prev_cputime.utime = p->prev_cputime.stime = 0;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
seqlock_init(&p->vtime_seqlock); seqlock_init(&p->vtime_seqlock);
p->vtime_snap = 0; p->vtime_snap = 0;
......
...@@ -555,48 +555,43 @@ static cputime_t scale_stime(u64 stime, u64 rtime, u64 total) ...@@ -555,48 +555,43 @@ static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
} }
/* /*
* Atomically advance counter to the new value. Interrupts, vcpu * Adjust tick based cputime random precision against scheduler runtime
* scheduling, and scaling inaccuracies can cause cputime_advance * accounting.
* to be occasionally called with a new value smaller than counter.
* Let's enforce atomicity.
* *
* Normally a caller will only go through this loop once, or not * Tick based cputime accounting depend on random scheduling timeslices of a
* at all in case a previous caller updated counter the same jiffy. * task to be interrupted or not by the timer. Depending on these
*/ * circumstances, the number of these interrupts may be over or
static void cputime_advance(cputime_t *counter, cputime_t new) * under-optimistic, matching the real user and system cputime with a variable
{ * precision.
cputime_t old; *
* Fix this by scaling these tick based values against the total runtime
while (new > (old = READ_ONCE(*counter))) * accounted by the CFS scheduler.
cmpxchg_cputime(counter, old, new); *
} * This code provides the following guarantees:
*
/* * stime + utime == rtime
* Adjust tick based cputime random precision against scheduler * stime_i+1 >= stime_i, utime_i+1 >= utime_i
* runtime accounting. *
* Assuming that rtime_i+1 >= rtime_i.
*/ */
static void cputime_adjust(struct task_cputime *curr, static void cputime_adjust(struct task_cputime *curr,
struct cputime *prev, struct prev_cputime *prev,
cputime_t *ut, cputime_t *st) cputime_t *ut, cputime_t *st)
{ {
cputime_t rtime, stime, utime; cputime_t rtime, stime, utime;
unsigned long flags;
/* /* Serialize concurrent callers such that we can honour our guarantees */
* Tick based cputime accounting depend on random scheduling raw_spin_lock_irqsave(&prev->lock, flags);
* timeslices of a task to be interrupted or not by the timer.
* Depending on these circumstances, the number of these interrupts
* may be over or under-optimistic, matching the real user and system
* cputime with a variable precision.
*
* Fix this by scaling these tick based values against the total
* runtime accounted by the CFS scheduler.
*/
rtime = nsecs_to_cputime(curr->sum_exec_runtime); rtime = nsecs_to_cputime(curr->sum_exec_runtime);
/* /*
* Update userspace visible utime/stime values only if actual execution * This is possible under two circumstances:
* time is bigger than already exported. Note that can happen, that we * - rtime isn't monotonic after all (a bug);
* provided bigger values due to scaling inaccuracy on big numbers. * - we got reordered by the lock.
*
* In both cases this acts as a filter such that the rest of the code
* can assume it is monotonic regardless of anything else.
*/ */
if (prev->stime + prev->utime >= rtime) if (prev->stime + prev->utime >= rtime)
goto out; goto out;
...@@ -606,22 +601,46 @@ static void cputime_adjust(struct task_cputime *curr, ...@@ -606,22 +601,46 @@ static void cputime_adjust(struct task_cputime *curr,
if (utime == 0) { if (utime == 0) {
stime = rtime; stime = rtime;
} else if (stime == 0) { goto update;
utime = rtime; }
} else {
cputime_t total = stime + utime;
stime = scale_stime((__force u64)stime, if (stime == 0) {
(__force u64)rtime, (__force u64)total); utime = rtime;
utime = rtime - stime; goto update;
} }
cputime_advance(&prev->stime, stime); stime = scale_stime((__force u64)stime, (__force u64)rtime,
cputime_advance(&prev->utime, utime); (__force u64)(stime + utime));
/*
* Make sure stime doesn't go backwards; this preserves monotonicity
* for utime because rtime is monotonic.
*
* utime_i+1 = rtime_i+1 - stime_i
* = rtime_i+1 - (rtime_i - utime_i)
* = (rtime_i+1 - rtime_i) + utime_i
* >= utime_i
*/
if (stime < prev->stime)
stime = prev->stime;
utime = rtime - stime;
/*
* Make sure utime doesn't go backwards; this still preserves
* monotonicity for stime, analogous argument to above.
*/
if (utime < prev->utime) {
utime = prev->utime;
stime = rtime - utime;
}
update:
prev->stime = stime;
prev->utime = utime;
out: out:
*ut = prev->utime; *ut = prev->utime;
*st = prev->stime; *st = prev->stime;
raw_spin_unlock_irqrestore(&prev->lock, flags);
} }
void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment