Commit 0a71336b authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Linus Torvalds

[PATCH] cputime: introduce cputime

This patch introduces the concept of (virtual) cputime.  Each architecture
can define its method to measure cputime.  The main idea is to define a
cputime_t type and a set of operations on it (see asm-generic/cputime.h).
Then use the type for utime, stime, cutime, cstime, it_virt_value,
it_virt_incr, it_prof_value and it_prof_incr and use the cputime operations
for each access to these variables.  The default implementation is jiffies
based and the effect of this patch for architectures which use the default
implementation should be neglectible.

There is a second type cputime64_t which is necessary for the kernel_stat
cpu statistics.  The default cputime_t is 32 bit and based on HZ, this will
overflow after 49.7 days.  This is not enough for kernel_stat (ihmo not
enough for a processes too), so it is necessary to have a 64 bit type.

The third thing that gets introduced by this patch is an additional field
for the /proc/stat interface: cpu steal time.  An architecture can account
cpu steal time by calls to the account_stealtime function.  The cpu which
backs a virtual processor doesn't spent all of its time for the virtual
cpu.  To get meaningful cpu usage numbers this involuntary wait time needs
to be accounted and exported to user space.

From: Hugh Dickins <hugh@veritas.com>

The p->signal check in account_system_time is insufficient.  If the timer
interrupt hits near the end of exit_notify, after EXIT_ZOMBIE has been set,
another cpu may release_task (NULLifying p->signal) in between
account_system_time's check and check_rlimit's dereference.  Nor should
account_it_prof risk send_sig.  But surely account_user_time is safe?
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 0fada656
......@@ -92,10 +92,12 @@ struct elf_prpsinfo32
current->thread.map_base = DEFAULT_MAP_BASE32; \
current->thread.task_size = DEFAULT_TASK_SIZE32 \
#define jiffies_to_timeval jiffies_to_compat_timeval
#undef cputime_to_timeval
#define cputime_to_timeval cputime_to_compat_timeval
static __inline__ void
jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
{
unsigned long jiffies = cputime_to_jiffies(cputime);
value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
value->tv_sec = jiffies / HZ;
}
......
......@@ -60,10 +60,12 @@ struct elf_prpsinfo32
#include <linux/time.h>
#define jiffies_to_timeval jiffies_to_compat_timeval
#undef cputime_to_timeval
#define cputime_to_timeval cputime_to_compat_timeval
static __inline__ void
jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
{
unsigned long jiffies = cputime_to_jiffies(cputime);
value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
value->tv_sec = jiffies / HZ;
}
......
......@@ -197,10 +197,12 @@ MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
#undef MODULE_DESCRIPTION
#undef MODULE_AUTHOR
#define jiffies_to_timeval jiffies_to_compat_timeval
#undef cputime_to_timeval
#define cputime_to_timeval cputime_to_compat_timeval
static __inline__ void
jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
{
unsigned long jiffies = cputime_to_jiffies(cputime);
value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
value->tv_sec = jiffies / HZ;
}
......
......@@ -132,10 +132,12 @@ struct elf_prpsinfo32
#include <linux/time.h>
#define jiffies_to_timeval jiffies_to_compat_timeval
#undef cputime_to_timeval
#define cputime_to_timeval cputime_to_compat_timeval
static __inline__ void
jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
{
unsigned long jiffies = cputime_to_jiffies(cputime);
value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
value->tv_sec = jiffies / HZ;
}
......
......@@ -1215,16 +1215,16 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
* this and each other thread to finish dying after the
* core dump synchronization phase.
*/
jiffies_to_timeval(p->utime + p->signal->utime,
cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
&prstatus->pr_utime);
jiffies_to_timeval(p->stime + p->signal->stime,
cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
&prstatus->pr_stime);
} else {
jiffies_to_timeval(p->utime, &prstatus->pr_utime);
jiffies_to_timeval(p->stime, &prstatus->pr_stime);
cputime_to_timeval(p->utime, &prstatus->pr_utime);
cputime_to_timeval(p->stime, &prstatus->pr_stime);
}
jiffies_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
jiffies_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
}
static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
......
......@@ -313,8 +313,9 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
int num_threads = 0;
struct mm_struct *mm;
unsigned long long start_time;
unsigned long cmin_flt = 0, cmaj_flt = 0, cutime = 0, cstime = 0;
unsigned long min_flt = 0, maj_flt = 0, utime = 0, stime = 0;
unsigned long cmin_flt = 0, cmaj_flt = 0;
unsigned long min_flt = 0, maj_flt = 0;
cputime_t cutime, cstime, utime, stime;
unsigned long rsslim = 0;
struct task_struct *t;
char tcomm[sizeof(task->comm)];
......@@ -332,6 +333,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
sigemptyset(&sigign);
sigemptyset(&sigcatch);
cutime = cstime = utime = stime = cputime_zero;
read_lock(&tasklist_lock);
if (task->sighand) {
spin_lock_irq(&task->sighand->siglock);
......@@ -344,8 +346,8 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
do {
min_flt += t->min_flt;
maj_flt += t->maj_flt;
utime += t->utime;
stime += t->stime;
utime = cputime_add(utime, t->utime);
stime = cputime_add(stime, t->stime);
t = next_thread(t);
} while (t != task);
}
......@@ -367,8 +369,8 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
if (whole) {
min_flt += task->signal->min_flt;
maj_flt += task->signal->maj_flt;
utime += task->signal->utime;
stime += task->signal->stime;
utime = cputime_add(utime, task->signal->utime);
stime = cputime_add(stime, task->signal->stime);
}
}
ppid = pid_alive(task) ? task->group_leader->real_parent->tgid : 0;
......@@ -411,10 +413,10 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
cmin_flt,
maj_flt,
cmaj_flt,
jiffies_to_clock_t(utime),
jiffies_to_clock_t(stime),
jiffies_to_clock_t(cutime),
jiffies_to_clock_t(cstime),
cputime_to_clock_t(utime),
cputime_to_clock_t(stime),
cputime_to_clock_t(cutime),
cputime_to_clock_t(cstime),
priority,
nice,
num_threads,
......
......@@ -101,10 +101,10 @@ static int uptime_read_proc(char *page, char **start, off_t off,
struct timespec uptime;
struct timespec idle;
int len;
u64 idle_jiffies = init_task.utime + init_task.stime;
cputime_t idletime = cputime_add(init_task.utime, init_task.stime);
do_posix_clock_monotonic_gettime(&uptime);
jiffies_to_timespec(idle_jiffies, &idle);
cputime_to_timespec(idletime, &idle);
len = sprintf(page,"%lu.%02lu %lu.%02lu\n",
(unsigned long) uptime.tv_sec,
(uptime.tv_nsec / (NSEC_PER_SEC / 100)),
......@@ -322,9 +322,11 @@ static int show_stat(struct seq_file *p, void *v)
{
int i;
unsigned long jif;
u64 sum = 0, user = 0, nice = 0, system = 0,
idle = 0, iowait = 0, irq = 0, softirq = 0;
cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
u64 sum = 0;
user = nice = system = idle = iowait =
irq = softirq = steal = cputime64_zero;
jif = - wall_to_monotonic.tv_sec;
if (wall_to_monotonic.tv_nsec)
--jif;
......@@ -332,25 +334,27 @@ static int show_stat(struct seq_file *p, void *v)
for_each_cpu(i) {
int j;
user += kstat_cpu(i).cpustat.user;
nice += kstat_cpu(i).cpustat.nice;
system += kstat_cpu(i).cpustat.system;
idle += kstat_cpu(i).cpustat.idle;
iowait += kstat_cpu(i).cpustat.iowait;
irq += kstat_cpu(i).cpustat.irq;
softirq += kstat_cpu(i).cpustat.softirq;
user = cputime64_add(user, kstat_cpu(i).cpustat.user);
nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
system = cputime64_add(system, kstat_cpu(i).cpustat.system);
idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
for (j = 0 ; j < NR_IRQS ; j++)
sum += kstat_cpu(i).irqs[j];
}
seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu\n",
(unsigned long long)jiffies_64_to_clock_t(user),
(unsigned long long)jiffies_64_to_clock_t(nice),
(unsigned long long)jiffies_64_to_clock_t(system),
(unsigned long long)jiffies_64_to_clock_t(idle),
(unsigned long long)jiffies_64_to_clock_t(iowait),
(unsigned long long)jiffies_64_to_clock_t(irq),
(unsigned long long)jiffies_64_to_clock_t(softirq));
seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu\n",
(unsigned long long)cputime64_to_clock_t(user),
(unsigned long long)cputime64_to_clock_t(nice),
(unsigned long long)cputime64_to_clock_t(system),
(unsigned long long)cputime64_to_clock_t(idle),
(unsigned long long)cputime64_to_clock_t(iowait),
(unsigned long long)cputime64_to_clock_t(irq),
(unsigned long long)cputime64_to_clock_t(softirq),
(unsigned long long)cputime64_to_clock_t(steal));
for_each_online_cpu(i) {
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
......@@ -361,15 +365,17 @@ static int show_stat(struct seq_file *p, void *v)
iowait = kstat_cpu(i).cpustat.iowait;
irq = kstat_cpu(i).cpustat.irq;
softirq = kstat_cpu(i).cpustat.softirq;
seq_printf(p, "cpu%d %llu %llu %llu %llu %llu %llu %llu\n",
steal = kstat_cpu(i).cpustat.steal;
seq_printf(p, "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu\n",
i,
(unsigned long long)jiffies_64_to_clock_t(user),
(unsigned long long)jiffies_64_to_clock_t(nice),
(unsigned long long)jiffies_64_to_clock_t(system),
(unsigned long long)jiffies_64_to_clock_t(idle),
(unsigned long long)jiffies_64_to_clock_t(iowait),
(unsigned long long)jiffies_64_to_clock_t(irq),
(unsigned long long)jiffies_64_to_clock_t(softirq));
(unsigned long long)cputime64_to_clock_t(user),
(unsigned long long)cputime64_to_clock_t(nice),
(unsigned long long)cputime64_to_clock_t(system),
(unsigned long long)cputime64_to_clock_t(idle),
(unsigned long long)cputime64_to_clock_t(iowait),
(unsigned long long)cputime64_to_clock_t(irq),
(unsigned long long)cputime64_to_clock_t(softirq),
(unsigned long long)cputime64_to_clock_t(steal));
}
seq_printf(p, "intr %llu", (unsigned long long)sum);
......
#ifndef __ALPHA_CPUTIME_H
#define __ALPHA_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __ALPHA_CPUTIME_H */
#ifndef __ARM_CPUTIME_H
#define __ARM_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __ARM_CPUTIME_H */
#ifndef __ARM26_CPUTIME_H
#define __ARM26_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __ARM26_CPUTIME_H */
#ifndef __CRIS_CPUTIME_H
#define __CRIS_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __CRIS_CPUTIME_H */
#ifndef _ASM_GENERIC_CPUTIME_H
#define _ASM_GENERIC_CPUTIME_H
#include <linux/time.h>
#include <linux/jiffies.h>
typedef unsigned long cputime_t;
#define cputime_zero (0UL)
#define cputime_max ((~0UL >> 1) - 1)
#define cputime_add(__a, __b) ((__a) + (__b))
#define cputime_sub(__a, __b) ((__a) - (__b))
#define cputime_eq(__a, __b) ((__a) == (__b))
#define cputime_gt(__a, __b) ((__a) > (__b))
#define cputime_ge(__a, __b) ((__a) >= (__b))
#define cputime_lt(__a, __b) ((__a) < (__b))
#define cputime_le(__a, __b) ((__a) <= (__b))
#define cputime_to_jiffies(__ct) (__ct)
#define jiffies_to_cputime(__hz) (__hz)
typedef u64 cputime64_t;
#define cputime64_zero (0ULL)
#define cputime64_add(__a, __b) ((__a) + (__b))
#define cputime64_to_jiffies64(__ct) (__ct)
#define cputime_to_cputime64(__ct) ((u64) __ct)
/*
* Convert cputime to milliseconds and back.
*/
#define cputime_to_msecs(__ct) jiffies_to_msecs(__ct)
#define msecs_to_cputime(__msecs) msecs_to_jiffies(__msecs)
/*
* Convert cputime to seconds and back.
*/
#define cputime_to_secs(__ct) (jiffies_to_msecs(__ct) / HZ)
#define secs_to_cputime(__secs) (msecs_to_jiffies(__secs * HZ))
/*
* Convert cputime to timespec and back.
*/
#define timespec_to_cputime(__val) timespec_to_jiffies(__val)
#define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val)
/*
* Convert cputime to timeval and back.
*/
#define timeval_to_cputime(__val) timeval_to_jiffies(__val)
#define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val)
/*
* Convert cputime to clock and back.
*/
#define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct)
#define clock_t_to_cputime(__x) clock_t_to_jiffies(__x)
/*
* Convert cputime64 to clock.
*/
#define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct)
#endif
#ifndef __H8300_CPUTIME_H
#define __H8300_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __H8300_CPUTIME_H */
#ifndef __I386_CPUTIME_H
#define __I386_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __I386_CPUTIME_H */
#ifndef __IA64_CPUTIME_H
#define __IA64_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __IA64_CPUTIME_H */
#ifndef __M32R_CPUTIME_H
#define __M32R_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __M32R_CPUTIME_H */
#ifndef __M68K_CPUTIME_H
#define __M68K_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __M68K_CPUTIME_H */
#ifndef __M68KNOMMU_CPUTIME_H
#define __M68KNOMMU_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __M68KNOMMU_CPUTIME_H */
#ifndef __MIPS_CPUTIME_H
#define __MIPS_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __MIPS_CPUTIME_H */
#ifndef __PARISC_CPUTIME_H
#define __PARISC_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __PARISC_CPUTIME_H */
#ifndef __PPC_CPUTIME_H
#define __PPC_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __PPC_CPUTIME_H */
#ifndef __PPC_CPUTIME_H
#define __PPC_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __PPC_CPUTIME_H */
#ifndef __S390_CPUTIME_H
#define __S390_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __S390_CPUTIME_H */
#ifndef __SH_CPUTIME_H
#define __SH_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __SH_CPUTIME_H */
#ifndef __SH64_CPUTIME_H
#define __SH64_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __SH64_CPUTIME_H */
#ifndef __SPARC_CPUTIME_H
#define __SPARC_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __SPARC_CPUTIME_H */
#ifndef __SPARC64_CPUTIME_H
#define __SPARC64_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __SPARC64_CPUTIME_H */
#ifndef __UM_CPUTIME_H
#define __UM_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __UM_CPUTIME_H */
#ifndef __V850_CPUTIME_H
#define __V850_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __V850_CPUTIME_H */
#ifndef __X86_64_CPUTIME_H
#define __X86_64_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __X86_64_CPUTIME_H */
......@@ -6,6 +6,7 @@
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/percpu.h>
#include <asm/cputime.h>
/*
* 'kernel_stat.h' contains the definitions needed for doing
......@@ -14,13 +15,14 @@
*/
struct cpu_usage_stat {
u64 user;
u64 nice;
u64 system;
u64 softirq;
u64 irq;
u64 idle;
u64 iowait;
cputime64_t user;
cputime64_t nice;
cputime64_t system;
cputime64_t softirq;
cputime64_t irq;
cputime64_t idle;
cputime64_t iowait;
cputime64_t steal;
};
struct kernel_stat {
......@@ -50,4 +52,8 @@ static inline int kstat_irqs(int irq)
return sum;
}
extern void account_user_time(struct task_struct *, cputime_t);
extern void account_system_time(struct task_struct *, int, cputime_t);
extern void account_steal_time(struct task_struct *, cputime_t);
#endif /* _LINUX_KERNEL_STAT_H */
......@@ -20,6 +20,7 @@
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/mmu.h>
#include <asm/cputime.h>
#include <linux/smp.h>
#include <linux/sem.h>
......@@ -168,7 +169,7 @@ long io_schedule_timeout(long timeout);
extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(int user_tick, int system);
extern void scheduler_tick(void);
extern unsigned long cache_decay_ticks;
/* Attach to any functions which should be ignored in wchan output. */
......@@ -311,7 +312,7 @@ struct signal_struct {
* Live threads maintain their own counters and add to these
* in __exit_signal, except for the group leader.
*/
unsigned long utime, stime, cutime, cstime;
cputime_t utime, stime, cutime, cstime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
......@@ -589,10 +590,11 @@ struct task_struct {
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
unsigned long rt_priority;
unsigned long it_real_value, it_prof_value, it_virt_value;
unsigned long it_real_incr, it_prof_incr, it_virt_incr;
unsigned long it_real_value, it_real_incr;
cputime_t it_virt_value, it_virt_incr;
cputime_t it_prof_value, it_prof_incr;
struct timer_list real_timer;
unsigned long utime, stime;
cputime_t utime, stime;
unsigned long nvcsw, nivcsw; /* context switch counts */
struct timespec start_time;
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
......
......@@ -163,15 +163,15 @@ asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
struct compat_tms tmp;
struct task_struct *tsk = current;
struct task_struct *t;
unsigned long utime, stime, cutime, cstime;
cputime_t utime, stime, cutime, cstime;
read_lock(&tasklist_lock);
utime = tsk->signal->utime;
stime = tsk->signal->stime;
t = tsk;
do {
utime += t->utime;
stime += t->stime;
utime = cputime_add(utime, t->utime);
stime = cputime_add(stime, t->stime);
t = next_thread(t);
} while (t != tsk);
......@@ -190,10 +190,10 @@ asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock);
tmp.tms_utime = compat_jiffies_to_clock_t(utime);
tmp.tms_stime = compat_jiffies_to_clock_t(stime);
tmp.tms_cutime = compat_jiffies_to_clock_t(cutime);
tmp.tms_cstime = compat_jiffies_to_clock_t(cstime);
tmp.tms_utime = compat_jiffies_to_clock_t(cputime_to_jiffies(utime));
tmp.tms_stime = compat_jiffies_to_clock_t(cputime_to_jiffies(stime));
tmp.tms_cutime = compat_jiffies_to_clock_t(cputime_to_jiffies(cutime));
tmp.tms_cstime = compat_jiffies_to_clock_t(cputime_to_jiffies(cstime));
if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
return -EFAULT;
}
......
......@@ -48,7 +48,9 @@ static inline void check_for_tasks(int cpu)
write_lock_irq(&tasklist_lock);
for_each_process(p) {
if (task_cpu(p) == cpu && (p->utime != 0 || p->stime != 0))
if (task_cpu(p) == cpu &&
(!cputime_eq(p->utime, cputime_zero) ||
!cputime_eq(p->stime, cputime_zero)))
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
(state = %ld, flags = %lx) \n",
p->comm, p->pid, cpu, p->state, p->flags);
......
......@@ -755,8 +755,8 @@ static void exit_notify(struct task_struct *tsk)
* Clear these here so that update_process_times() won't try to deliver
* itimer, profile or rlimit signals to this task while it is in late exit.
*/
tsk->it_virt_value = 0;
tsk->it_prof_value = 0;
tsk->it_virt_value = cputime_zero;
tsk->it_prof_value = cputime_zero;
write_unlock_irq(&tasklist_lock);
......@@ -1046,10 +1046,16 @@ static int wait_task_zombie(task_t *p, int noreap,
* here reaping other children at the same time.
*/
spin_lock_irq(&p->parent->sighand->siglock);
p->parent->signal->cutime +=
p->utime + p->signal->utime + p->signal->cutime;
p->parent->signal->cstime +=
p->stime + p->signal->stime + p->signal->cstime;
p->parent->signal->cutime =
cputime_add(p->parent->signal->cutime,
cputime_add(p->utime,
cputime_add(p->signal->utime,
p->signal->cutime)));
p->parent->signal->cstime =
cputime_add(p->parent->signal->cstime,
cputime_add(p->stime,
cputime_add(p->signal->stime,
p->signal->cstime)));
p->parent->signal->cmin_flt +=
p->min_flt + p->signal->min_flt + p->signal->cmin_flt;
p->parent->signal->cmaj_flt +=
......
......@@ -749,7 +749,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
sig->leader = 0; /* session leadership doesn't inherit */
sig->tty_old_pgrp = 0;
sig->utime = sig->stime = sig->cutime = sig->cstime = 0;
sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
......@@ -871,15 +871,15 @@ static task_t *copy_process(unsigned long clone_flags,
p->it_real_value = 0;
p->it_real_incr = 0;
p->it_virt_value = 0;
p->it_virt_incr = 0;
p->it_prof_value = 0;
p->it_prof_incr = 0;
p->it_virt_value = cputime_zero;
p->it_virt_incr = cputime_zero;
p->it_prof_value = cputime_zero;
p->it_prof_incr = cputime_zero;
init_timer(&p->real_timer);
p->real_timer.data = (unsigned long) p;
p->utime = 0;
p->stime = 0;
p->utime = cputime_zero;
p->stime = cputime_zero;
p->rchar = 0; /* I/O counter: bytes read */
p->wchar = 0; /* I/O counter: bytes written */
p->syscr = 0; /* I/O counter: read syscalls */
......
......@@ -16,11 +16,10 @@
int do_getitimer(int which, struct itimerval *value)
{
register unsigned long val, interval;
register unsigned long val;
switch (which) {
case ITIMER_REAL:
interval = current->it_real_incr;
val = 0;
/*
* FIXME! This needs to be atomic, in case the kernel timer happens!
......@@ -32,20 +31,20 @@ int do_getitimer(int which, struct itimerval *value)
if ((long) val <= 0)
val = 1;
}
jiffies_to_timeval(val, &value->it_value);
jiffies_to_timeval(current->it_real_incr, &value->it_interval);
break;
case ITIMER_VIRTUAL:
val = current->it_virt_value;
interval = current->it_virt_incr;
cputime_to_timeval(current->it_virt_value, &value->it_value);
cputime_to_timeval(current->it_virt_incr, &value->it_interval);
break;
case ITIMER_PROF:
val = current->it_prof_value;
interval = current->it_prof_incr;
cputime_to_timeval(current->it_prof_value, &value->it_value);
cputime_to_timeval(current->it_prof_incr, &value->it_interval);
break;
default:
return(-EINVAL);
}
jiffies_to_timeval(val, &value->it_value);
jiffies_to_timeval(interval, &value->it_interval);
return 0;
}
......@@ -81,37 +80,43 @@ void it_real_fn(unsigned long __data)
int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
{
register unsigned long i, j;
unsigned long expire;
cputime_t cputime;
int k;
i = timeval_to_jiffies(&value->it_interval);
j = timeval_to_jiffies(&value->it_value);
if (ovalue && (k = do_getitimer(which, ovalue)) < 0)
return k;
switch (which) {
case ITIMER_REAL:
del_timer_sync(&current->real_timer);
current->it_real_value = j;
current->it_real_incr = i;
if (!j)
expire = timeval_to_jiffies(&value->it_value);
current->it_real_value = expire;
current->it_real_incr =
timeval_to_jiffies(&value->it_interval);
if (!expire)
break;
if (j > (unsigned long) LONG_MAX)
j = LONG_MAX;
i = j + jiffies;
current->real_timer.expires = i;
if (expire > (unsigned long) LONG_MAX)
expire = LONG_MAX;
current->real_timer.expires = jiffies + expire;
add_timer(&current->real_timer);
break;
case ITIMER_VIRTUAL:
if (j)
j++;
current->it_virt_value = j;
current->it_virt_incr = i;
cputime = timeval_to_cputime(&value->it_value);
if (cputime_gt(cputime, cputime_zero))
cputime = cputime_add(cputime,
jiffies_to_cputime(1));
current->it_virt_value = cputime;
cputime = timeval_to_cputime(&value->it_interval);
current->it_virt_incr = cputime;
break;
case ITIMER_PROF:
if (j)
j++;
current->it_prof_value = j;
current->it_prof_incr = i;
cputime = timeval_to_cputime(&value->it_value);
if (cputime_gt(cputime, cputime_zero))
cputime = cputime_add(cputime,
jiffies_to_cputime(1));
current->it_prof_value = cputime;
cputime = timeval_to_cputime(&value->it_interval);
current->it_prof_incr = cputime;
break;
default:
return -EINVAL;
......
......@@ -1182,7 +1182,7 @@ void fastcall sched_fork(task_t *p)
*/
current->time_slice = 1;
preempt_disable();
scheduler_tick(0, 0);
scheduler_tick();
local_irq_enable();
preempt_enable();
} else
......@@ -2250,6 +2250,148 @@ EXPORT_PER_CPU_SYMBOL(kstat);
STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
((rq)->curr->static_prio > (rq)->best_expired_prio))
/*
* Do the virtual cpu time signal calculations.
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in user space since the last update
*/
static inline void account_it_virt(struct task_struct * p, cputime_t cputime)
{
cputime_t it_virt = p->it_virt_value;
if (cputime_gt(it_virt, cputime_zero) &&
cputime_gt(cputime, cputime_zero)) {
if (cputime_ge(cputime, it_virt)) {
it_virt = cputime_add(it_virt, p->it_virt_incr);
send_sig(SIGVTALRM, p, 1);
}
it_virt = cputime_sub(it_virt, cputime);
p->it_virt_value = it_virt;
}
}
/*
* Do the virtual profiling signal calculations.
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in user and kernel space since the last update
*/
static void account_it_prof(struct task_struct *p, cputime_t cputime)
{
cputime_t it_prof = p->it_prof_value;
if (cputime_gt(it_prof, cputime_zero) &&
cputime_gt(cputime, cputime_zero)) {
if (cputime_ge(cputime, it_prof)) {
it_prof = cputime_add(it_prof, p->it_prof_incr);
send_sig(SIGPROF, p, 1);
}
it_prof = cputime_sub(it_prof, cputime);
p->it_prof_value = it_prof;
}
}
/*
* Check if the process went over its cputime resource limit after
* some cpu time got added to utime/stime.
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in user and kernel space since the last update
*/
static void check_rlimit(struct task_struct *p, cputime_t cputime)
{
cputime_t total, tmp;
total = cputime_add(p->utime, p->stime);
tmp = jiffies_to_cputime(p->signal->rlim[RLIMIT_CPU].rlim_cur);
if (unlikely(cputime_gt(total, tmp))) {
/* Send SIGXCPU every second. */
tmp = cputime_sub(total, cputime);
if (cputime_to_secs(tmp) < cputime_to_secs(total))
send_sig(SIGXCPU, p, 1);
/* and SIGKILL when we go over max.. */
tmp = jiffies_to_cputime(p->signal->rlim[RLIMIT_CPU].rlim_max);
if (cputime_gt(total, tmp))
send_sig(SIGKILL, p, 1);
}
}
/*
* Account user cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count()
* @cputime: the cpu time spent in user space since the last update
*/
void account_user_time(struct task_struct *p, cputime_t cputime)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t tmp;
p->utime = cputime_add(p->utime, cputime);
/* Check for signals (SIGVTALRM, SIGPROF, SIGXCPU & SIGKILL). */
check_rlimit(p, cputime);
account_it_virt(p, cputime);
account_it_prof(p, cputime);
/* Add user time to cpustat. */
tmp = cputime_to_cputime64(cputime);
if (TASK_NICE(p) > 0)
cpustat->nice = cputime64_add(cpustat->nice, tmp);
else
cpustat->user = cputime64_add(cpustat->user, tmp);
}
/*
* Account system cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count()
* @cputime: the cpu time spent in kernel space since the last update
*/
void account_system_time(struct task_struct *p, int hardirq_offset,
cputime_t cputime)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
runqueue_t *rq = this_rq();
cputime64_t tmp;
p->stime = cputime_add(p->stime, cputime);
/* Check for signals (SIGPROF, SIGXCPU & SIGKILL). */
if (likely(p->signal && p->exit_state < EXIT_ZOMBIE)) {
check_rlimit(p, cputime);
account_it_prof(p, cputime);
}
/* Add system time to cpustat. */
tmp = cputime_to_cputime64(cputime);
if (hardirq_count() - hardirq_offset)
cpustat->irq = cputime64_add(cpustat->irq, tmp);
else if (softirq_count())
cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
else if (p != rq->idle)
cpustat->system = cputime64_add(cpustat->system, tmp);
else if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
else
cpustat->idle = cputime64_add(cpustat->idle, tmp);
}
/*
* Account for involuntary wait time.
* @p: the process from which the cpu time has been stolen
* @steal: the cpu time spent in involuntary wait
*/
void account_steal_time(struct task_struct *p, cputime_t steal)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t steal64 = cputime_to_cputime64(steal);
runqueue_t *rq = this_rq();
if (p == rq->idle)
cpustat->system = cputime64_add(cpustat->system, steal64);
else
cpustat->steal = cputime64_add(cpustat->steal, steal64);
}
/*
* This function gets called by the timer code, with HZ frequency.
* We call it with interrupts disabled.
......@@ -2257,42 +2399,20 @@ EXPORT_PER_CPU_SYMBOL(kstat);
* It also gets called by the fork code, when changing the parent's
* timeslices.
*/
void scheduler_tick(int user_ticks, int sys_ticks)
void scheduler_tick(void)
{
int cpu = smp_processor_id();
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
runqueue_t *rq = this_rq();
task_t *p = current;
rq->timestamp_last_tick = sched_clock();
if (rcu_pending(cpu))
rcu_check_callbacks(cpu, user_ticks);
/* note: this timer irq context must be accounted for as well */
if (hardirq_count() - HARDIRQ_OFFSET) {
cpustat->irq += sys_ticks;
sys_ticks = 0;
} else if (softirq_count()) {
cpustat->softirq += sys_ticks;
sys_ticks = 0;
}
if (p == rq->idle) {
if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait += sys_ticks;
else
cpustat->idle += sys_ticks;
if (wake_priority_sleeper(rq))
goto out;
rebalance_tick(cpu, rq, SCHED_IDLE);
return;
}
if (TASK_NICE(p) > 0)
cpustat->nice += user_ticks;
else
cpustat->user += user_ticks;
cpustat->system += sys_ticks;
/* Task might have expired already, but not scheduled off yet */
if (p->array != rq->active) {
......
......@@ -375,8 +375,8 @@ void __exit_signal(struct task_struct *tsk)
* We won't ever get here for the group leader, since it
* will have been the last reference on the signal_struct.
*/
sig->utime += tsk->utime;
sig->stime += tsk->stime;
sig->utime = cputime_add(sig->utime, tsk->utime);
sig->stime = cputime_add(sig->stime, tsk->stime);
sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt;
sig->nvcsw += tsk->nvcsw;
......@@ -1470,8 +1470,10 @@ void do_notify_parent(struct task_struct *tsk, int sig)
info.si_uid = tsk->uid;
/* FIXME: find out whether or not this is supposed to be c*time. */
info.si_utime = tsk->utime + tsk->signal->utime;
info.si_stime = tsk->stime + tsk->signal->stime;
info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
tsk->signal->utime));
info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
tsk->signal->stime));
info.si_status = tsk->exit_code & 0x7f;
if (tsk->exit_code & 0x80)
......@@ -1527,8 +1529,8 @@ do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
info.si_uid = tsk->uid;
/* FIXME: find out whether or not this is supposed to be c*time. */
info.si_utime = tsk->utime;
info.si_stime = tsk->stime;
info.si_utime = cputime_to_jiffies(tsk->utime);
info.si_stime = cputime_to_jiffies(tsk->stime);
info.si_code = why;
switch (why) {
......
......@@ -893,15 +893,15 @@ asmlinkage long sys_times(struct tms __user * tbuf)
struct tms tmp;
struct task_struct *tsk = current;
struct task_struct *t;
unsigned long utime, stime, cutime, cstime;
cputime_t utime, stime, cutime, cstime;
read_lock(&tasklist_lock);
utime = tsk->signal->utime;
stime = tsk->signal->stime;
t = tsk;
do {
utime += t->utime;
stime += t->stime;
utime = cputime_add(utime, t->utime);
stime = cputime_add(stime, t->stime);
t = next_thread(t);
} while (t != tsk);
......@@ -920,10 +920,10 @@ asmlinkage long sys_times(struct tms __user * tbuf)
spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock);
tmp.tms_utime = jiffies_to_clock_t(utime);
tmp.tms_stime = jiffies_to_clock_t(stime);
tmp.tms_cutime = jiffies_to_clock_t(cutime);
tmp.tms_cstime = jiffies_to_clock_t(cstime);
tmp.tms_utime = cputime_to_clock_t(utime);
tmp.tms_stime = cputime_to_clock_t(stime);
tmp.tms_cutime = cputime_to_clock_t(cutime);
tmp.tms_cstime = cputime_to_clock_t(cstime);
if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
return -EFAULT;
}
......@@ -1528,7 +1528,7 @@ void k_getrusage(struct task_struct *p, int who, struct rusage *r)
{
struct task_struct *t;
unsigned long flags;
unsigned long utime, stime;
cputime_t utime, stime;
memset((char *) r, 0, sizeof *r);
......@@ -1545,12 +1545,12 @@ void k_getrusage(struct task_struct *p, int who, struct rusage *r)
r->ru_minflt = p->signal->cmin_flt;
r->ru_majflt = p->signal->cmaj_flt;
spin_unlock_irqrestore(&p->sighand->siglock, flags);
jiffies_to_timeval(utime, &r->ru_utime);
jiffies_to_timeval(stime, &r->ru_stime);
cputime_to_timeval(utime, &r->ru_utime);
cputime_to_timeval(stime, &r->ru_stime);
break;
case RUSAGE_SELF:
spin_lock_irqsave(&p->sighand->siglock, flags);
utime = stime = 0;
utime = stime = cputime_zero;
goto sum_group;
case RUSAGE_BOTH:
spin_lock_irqsave(&p->sighand->siglock, flags);
......@@ -1561,16 +1561,16 @@ void k_getrusage(struct task_struct *p, int who, struct rusage *r)
r->ru_minflt = p->signal->cmin_flt;
r->ru_majflt = p->signal->cmaj_flt;
sum_group:
utime += p->signal->utime;
stime += p->signal->stime;
utime = cputime_add(utime, p->signal->utime);
stime = cputime_add(stime, p->signal->stime);
r->ru_nvcsw += p->signal->nvcsw;
r->ru_nivcsw += p->signal->nivcsw;
r->ru_minflt += p->signal->min_flt;
r->ru_majflt += p->signal->maj_flt;
t = p;
do {
utime += t->utime;
stime += t->stime;
utime = cputime_add(utime, t->utime);
stime = cputime_add(stime, t->stime);
r->ru_nvcsw += t->nvcsw;
r->ru_nivcsw += t->nivcsw;
r->ru_minflt += t->min_flt;
......@@ -1578,8 +1578,8 @@ void k_getrusage(struct task_struct *p, int who, struct rusage *r)
t = next_thread(t);
} while (t != p);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
jiffies_to_timeval(utime, &r->ru_utime);
jiffies_to_timeval(stime, &r->ru_stime);
cputime_to_timeval(utime, &r->ru_utime);
cputime_to_timeval(stime, &r->ru_stime);
break;
default:
BUG();
......
......@@ -806,59 +806,6 @@ static void update_wall_time(unsigned long ticks)
} while (ticks);
}
static inline void do_process_times(struct task_struct *p,
unsigned long user, unsigned long system)
{
unsigned long psecs;
psecs = (p->utime += user);
psecs += (p->stime += system);
if (p->signal && !unlikely(p->exit_state) &&
psecs / HZ >= p->signal->rlim[RLIMIT_CPU].rlim_cur) {
/* Send SIGXCPU every second.. */
if (!(psecs % HZ))
send_sig(SIGXCPU, p, 1);
/* and SIGKILL when we go over max.. */
if (psecs / HZ >= p->signal->rlim[RLIMIT_CPU].rlim_max)
send_sig(SIGKILL, p, 1);
}
}
static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
{
unsigned long it_virt = p->it_virt_value;
if (it_virt) {
it_virt -= ticks;
if (!it_virt) {
it_virt = p->it_virt_incr;
send_sig(SIGVTALRM, p, 1);
}
p->it_virt_value = it_virt;
}
}
static inline void do_it_prof(struct task_struct *p)
{
unsigned long it_prof = p->it_prof_value;
if (it_prof) {
if (--it_prof == 0) {
it_prof = p->it_prof_incr;
send_sig(SIGPROF, p, 1);
}
p->it_prof_value = it_prof;
}
}
static void update_one_process(struct task_struct *p, unsigned long user,
unsigned long system, int cpu)
{
do_process_times(p, user, system);
do_it_virt(p, user);
do_it_prof(p);
}
/*
* Called from the timer interrupt handler to charge one tick to the current
* process. user_tick is 1 if the tick is user time, 0 for system.
......@@ -866,11 +813,17 @@ static void update_one_process(struct task_struct *p, unsigned long user,
void update_process_times(int user_tick)
{
struct task_struct *p = current;
int cpu = smp_processor_id(), system = user_tick ^ 1;
int cpu = smp_processor_id();
update_one_process(p, user_tick, system, cpu);
/* Note: this timer irq context must be accounted for as well. */
if (user_tick)
account_user_time(p, jiffies_to_cputime(1));
else
account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
run_local_timers();
scheduler_tick(user_tick, system);
if (rcu_pending(cpu))
rcu_check_callbacks(cpu, user_tick);
scheduler_tick();
}
/*
......
......@@ -61,7 +61,8 @@ static unsigned long badness(struct task_struct *p, unsigned long uptime)
* of seconds. There is no particular reason for this other than
* that it turned out to work very well in practice.
*/
cpu_time = (p->utime + p->stime) >> (SHIFT_HZ + 3);
cpu_time = (cputime_to_jiffies(p->utime) + cputime_to_jiffies(p->stime))
>> (SHIFT_HZ + 3);
if (uptime >= p->start_time.tv_sec)
run_time = (uptime - p->start_time.tv_sec) >> 10;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment