Commit 64861634 authored by Martin Schwidefsky's avatar Martin Schwidefsky

[S390] cputime: add sparse checking and cleanup

Make cputime_t and cputime64_t nocast to enable sparse checking to
detect incorrect use of cputime. Drop the cputime macros for simple
scalar operations. The conversion macros are still needed.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 55b02d2f
...@@ -26,59 +26,51 @@ ...@@ -26,59 +26,51 @@
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <asm/processor.h> #include <asm/processor.h>
typedef u64 cputime_t; typedef u64 __nocast cputime_t;
typedef u64 cputime64_t; typedef u64 __nocast cputime64_t;
#define cputime_zero ((cputime_t)0)
#define cputime_one_jiffy jiffies_to_cputime(1) #define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_max ((~((cputime_t)0) >> 1) - 1)
#define cputime_add(__a, __b) ((__a) + (__b))
#define cputime_sub(__a, __b) ((__a) - (__b))
#define cputime_div(__a, __n) ((__a) / (__n))
#define cputime_halve(__a) ((__a) >> 1)
#define cputime_eq(__a, __b) ((__a) == (__b))
#define cputime_gt(__a, __b) ((__a) > (__b))
#define cputime_ge(__a, __b) ((__a) >= (__b))
#define cputime_lt(__a, __b) ((__a) < (__b))
#define cputime_le(__a, __b) ((__a) <= (__b))
#define cputime64_zero ((cputime64_t)0)
#define cputime64_add(__a, __b) ((__a) + (__b))
#define cputime64_sub(__a, __b) ((__a) - (__b))
#define cputime_to_cputime64(__ct) (__ct)
/* /*
* Convert cputime <-> jiffies (HZ) * Convert cputime <-> jiffies (HZ)
*/ */
#define cputime_to_jiffies(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) #define cputime_to_jiffies(__ct) \
#define jiffies_to_cputime(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
#define cputime64_to_jiffies64(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) #define jiffies_to_cputime(__jif) \
#define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
#define cputime64_to_jiffies64(__ct) \
((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
#define jiffies64_to_cputime64(__jif) \
(__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
/* /*
* Convert cputime <-> microseconds * Convert cputime <-> microseconds
*/ */
#define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC) #define cputime_to_usecs(__ct) \
#define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC) ((__force u64)(__ct) / NSEC_PER_USEC)
#define usecs_to_cputime(__usecs) \
(__force cputime_t)((__usecs) * NSEC_PER_USEC)
/* /*
* Convert cputime <-> seconds * Convert cputime <-> seconds
*/ */
#define cputime_to_secs(__ct) ((__ct) / NSEC_PER_SEC) #define cputime_to_secs(__ct) \
#define secs_to_cputime(__secs) ((__secs) * NSEC_PER_SEC) ((__force u64)(__ct) / NSEC_PER_SEC)
#define secs_to_cputime(__secs) \
(__force cputime_t)((__secs) * NSEC_PER_SEC)
/* /*
* Convert cputime <-> timespec (nsec) * Convert cputime <-> timespec (nsec)
*/ */
static inline cputime_t timespec_to_cputime(const struct timespec *val) static inline cputime_t timespec_to_cputime(const struct timespec *val)
{ {
cputime_t ret = val->tv_sec * NSEC_PER_SEC; u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
return (ret + val->tv_nsec); return (__force cputime_t) ret;
} }
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
{ {
val->tv_sec = ct / NSEC_PER_SEC; val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
val->tv_nsec = ct % NSEC_PER_SEC; val->tv_nsec = (__force u64) ct % NSEC_PER_SEC;
} }
/* /*
...@@ -86,25 +78,28 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) ...@@ -86,25 +78,28 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
*/ */
static inline cputime_t timeval_to_cputime(struct timeval *val) static inline cputime_t timeval_to_cputime(struct timeval *val)
{ {
cputime_t ret = val->tv_sec * NSEC_PER_SEC; u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
return (ret + val->tv_usec * NSEC_PER_USEC); return (__force cputime_t) ret;
} }
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
{ {
val->tv_sec = ct / NSEC_PER_SEC; val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC; val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC;
} }
/* /*
* Convert cputime <-> clock (USER_HZ) * Convert cputime <-> clock (USER_HZ)
*/ */
#define cputime_to_clock_t(__ct) ((__ct) / (NSEC_PER_SEC / USER_HZ)) #define cputime_to_clock_t(__ct) \
#define clock_t_to_cputime(__x) ((__x) * (NSEC_PER_SEC / USER_HZ)) ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ))
#define clock_t_to_cputime(__x) \
(__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
/* /*
* Convert cputime64 to clock. * Convert cputime64 to clock.
*/ */
#define cputime64_to_clock_t(__ct) cputime_to_clock_t((cputime_t)__ct) #define cputime64_to_clock_t(__ct) \
cputime_to_clock_t((__force cputime_t)__ct)
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
#endif /* __IA64_CPUTIME_H */ #endif /* __IA64_CPUTIME_H */
...@@ -29,25 +29,8 @@ static inline void setup_cputime_one_jiffy(void) { } ...@@ -29,25 +29,8 @@ static inline void setup_cputime_one_jiffy(void) { }
#include <asm/time.h> #include <asm/time.h>
#include <asm/param.h> #include <asm/param.h>
typedef u64 cputime_t; typedef u64 __nocast cputime_t;
typedef u64 cputime64_t; typedef u64 __nocast cputime64_t;
#define cputime_zero ((cputime_t)0)
#define cputime_max ((~((cputime_t)0) >> 1) - 1)
#define cputime_add(__a, __b) ((__a) + (__b))
#define cputime_sub(__a, __b) ((__a) - (__b))
#define cputime_div(__a, __n) ((__a) / (__n))
#define cputime_halve(__a) ((__a) >> 1)
#define cputime_eq(__a, __b) ((__a) == (__b))
#define cputime_gt(__a, __b) ((__a) > (__b))
#define cputime_ge(__a, __b) ((__a) >= (__b))
#define cputime_lt(__a, __b) ((__a) < (__b))
#define cputime_le(__a, __b) ((__a) <= (__b))
#define cputime64_zero ((cputime64_t)0)
#define cputime64_add(__a, __b) ((__a) + (__b))
#define cputime64_sub(__a, __b) ((__a) - (__b))
#define cputime_to_cputime64(__ct) (__ct)
#ifdef __KERNEL__ #ifdef __KERNEL__
...@@ -65,7 +48,7 @@ DECLARE_PER_CPU(unsigned long, cputime_scaled_last_delta); ...@@ -65,7 +48,7 @@ DECLARE_PER_CPU(unsigned long, cputime_scaled_last_delta);
static inline unsigned long cputime_to_jiffies(const cputime_t ct) static inline unsigned long cputime_to_jiffies(const cputime_t ct)
{ {
return mulhdu(ct, __cputime_jiffies_factor); return mulhdu((__force u64) ct, __cputime_jiffies_factor);
} }
/* Estimate the scaled cputime by scaling the real cputime based on /* Estimate the scaled cputime by scaling the real cputime based on
...@@ -74,14 +57,15 @@ static inline cputime_t cputime_to_scaled(const cputime_t ct) ...@@ -74,14 +57,15 @@ static inline cputime_t cputime_to_scaled(const cputime_t ct)
{ {
if (cpu_has_feature(CPU_FTR_SPURR) && if (cpu_has_feature(CPU_FTR_SPURR) &&
__get_cpu_var(cputime_last_delta)) __get_cpu_var(cputime_last_delta))
return ct * __get_cpu_var(cputime_scaled_last_delta) / return (__force u64) ct *
__get_cpu_var(cputime_scaled_last_delta) /
__get_cpu_var(cputime_last_delta); __get_cpu_var(cputime_last_delta);
return ct; return ct;
} }
static inline cputime_t jiffies_to_cputime(const unsigned long jif) static inline cputime_t jiffies_to_cputime(const unsigned long jif)
{ {
cputime_t ct; u64 ct;
unsigned long sec; unsigned long sec;
/* have to be a little careful about overflow */ /* have to be a little careful about overflow */
...@@ -93,7 +77,7 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif) ...@@ -93,7 +77,7 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif)
} }
if (sec) if (sec)
ct += (cputime_t) sec * tb_ticks_per_sec; ct += (cputime_t) sec * tb_ticks_per_sec;
return ct; return (__force cputime_t) ct;
} }
static inline void setup_cputime_one_jiffy(void) static inline void setup_cputime_one_jiffy(void)
...@@ -103,7 +87,7 @@ static inline void setup_cputime_one_jiffy(void) ...@@ -103,7 +87,7 @@ static inline void setup_cputime_one_jiffy(void)
static inline cputime64_t jiffies64_to_cputime64(const u64 jif) static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
{ {
cputime_t ct; u64 ct;
u64 sec; u64 sec;
/* have to be a little careful about overflow */ /* have to be a little careful about overflow */
...@@ -114,13 +98,13 @@ static inline cputime64_t jiffies64_to_cputime64(const u64 jif) ...@@ -114,13 +98,13 @@ static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
do_div(ct, HZ); do_div(ct, HZ);
} }
if (sec) if (sec)
ct += (cputime_t) sec * tb_ticks_per_sec; ct += (u64) sec * tb_ticks_per_sec;
return ct; return (__force cputime64_t) ct;
} }
static inline u64 cputime64_to_jiffies64(const cputime_t ct) static inline u64 cputime64_to_jiffies64(const cputime_t ct)
{ {
return mulhdu(ct, __cputime_jiffies_factor); return mulhdu((__force u64) ct, __cputime_jiffies_factor);
} }
/* /*
...@@ -130,12 +114,12 @@ extern u64 __cputime_msec_factor; ...@@ -130,12 +114,12 @@ extern u64 __cputime_msec_factor;
static inline unsigned long cputime_to_usecs(const cputime_t ct) static inline unsigned long cputime_to_usecs(const cputime_t ct)
{ {
return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC; return mulhdu((__force u64) ct, __cputime_msec_factor) * USEC_PER_MSEC;
} }
static inline cputime_t usecs_to_cputime(const unsigned long us) static inline cputime_t usecs_to_cputime(const unsigned long us)
{ {
cputime_t ct; u64 ct;
unsigned long sec; unsigned long sec;
/* have to be a little careful about overflow */ /* have to be a little careful about overflow */
...@@ -147,7 +131,7 @@ static inline cputime_t usecs_to_cputime(const unsigned long us) ...@@ -147,7 +131,7 @@ static inline cputime_t usecs_to_cputime(const unsigned long us)
} }
if (sec) if (sec)
ct += (cputime_t) sec * tb_ticks_per_sec; ct += (cputime_t) sec * tb_ticks_per_sec;
return ct; return (__force cputime_t) ct;
} }
/* /*
...@@ -157,12 +141,12 @@ extern u64 __cputime_sec_factor; ...@@ -157,12 +141,12 @@ extern u64 __cputime_sec_factor;
static inline unsigned long cputime_to_secs(const cputime_t ct) static inline unsigned long cputime_to_secs(const cputime_t ct)
{ {
return mulhdu(ct, __cputime_sec_factor); return mulhdu((__force u64) ct, __cputime_sec_factor);
} }
static inline cputime_t secs_to_cputime(const unsigned long sec) static inline cputime_t secs_to_cputime(const unsigned long sec)
{ {
return (cputime_t) sec * tb_ticks_per_sec; return (__force cputime_t)((u64) sec * tb_ticks_per_sec);
} }
/* /*
...@@ -170,7 +154,7 @@ static inline cputime_t secs_to_cputime(const unsigned long sec) ...@@ -170,7 +154,7 @@ static inline cputime_t secs_to_cputime(const unsigned long sec)
*/ */
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
{ {
u64 x = ct; u64 x = (__force u64) ct;
unsigned int frac; unsigned int frac;
frac = do_div(x, tb_ticks_per_sec); frac = do_div(x, tb_ticks_per_sec);
...@@ -182,11 +166,11 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) ...@@ -182,11 +166,11 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
static inline cputime_t timespec_to_cputime(const struct timespec *p) static inline cputime_t timespec_to_cputime(const struct timespec *p)
{ {
cputime_t ct; u64 ct;
ct = (u64) p->tv_nsec * tb_ticks_per_sec; ct = (u64) p->tv_nsec * tb_ticks_per_sec;
do_div(ct, 1000000000); do_div(ct, 1000000000);
return ct + (u64) p->tv_sec * tb_ticks_per_sec; return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec);
} }
/* /*
...@@ -194,7 +178,7 @@ static inline cputime_t timespec_to_cputime(const struct timespec *p) ...@@ -194,7 +178,7 @@ static inline cputime_t timespec_to_cputime(const struct timespec *p)
*/ */
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
{ {
u64 x = ct; u64 x = (__force u64) ct;
unsigned int frac; unsigned int frac;
frac = do_div(x, tb_ticks_per_sec); frac = do_div(x, tb_ticks_per_sec);
...@@ -206,11 +190,11 @@ static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) ...@@ -206,11 +190,11 @@ static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
static inline cputime_t timeval_to_cputime(const struct timeval *p) static inline cputime_t timeval_to_cputime(const struct timeval *p)
{ {
cputime_t ct; u64 ct;
ct = (u64) p->tv_usec * tb_ticks_per_sec; ct = (u64) p->tv_usec * tb_ticks_per_sec;
do_div(ct, 1000000); do_div(ct, 1000000);
return ct + (u64) p->tv_sec * tb_ticks_per_sec; return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec);
} }
/* /*
...@@ -220,12 +204,12 @@ extern u64 __cputime_clockt_factor; ...@@ -220,12 +204,12 @@ extern u64 __cputime_clockt_factor;
static inline unsigned long cputime_to_clock_t(const cputime_t ct) static inline unsigned long cputime_to_clock_t(const cputime_t ct)
{ {
return mulhdu(ct, __cputime_clockt_factor); return mulhdu((__force u64) ct, __cputime_clockt_factor);
} }
static inline cputime_t clock_t_to_cputime(const unsigned long clk) static inline cputime_t clock_t_to_cputime(const unsigned long clk)
{ {
cputime_t ct; u64 ct;
unsigned long sec; unsigned long sec;
/* have to be a little careful about overflow */ /* have to be a little careful about overflow */
...@@ -236,8 +220,8 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk) ...@@ -236,8 +220,8 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk)
do_div(ct, USER_HZ); do_div(ct, USER_HZ);
} }
if (sec) if (sec)
ct += (cputime_t) sec * tb_ticks_per_sec; ct += (u64) sec * tb_ticks_per_sec;
return ct; return (__force cputime_t) ct;
} }
#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct)) #define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct))
......
...@@ -16,114 +16,98 @@ ...@@ -16,114 +16,98 @@
/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
typedef unsigned long long cputime_t; typedef unsigned long long __nocast cputime_t;
typedef unsigned long long cputime64_t; typedef unsigned long long __nocast cputime64_t;
#ifndef __s390x__ static inline unsigned long __div(unsigned long long n, unsigned long base)
static inline unsigned int
__div(unsigned long long n, unsigned int base)
{ {
#ifndef __s390x__
register_pair rp; register_pair rp;
rp.pair = n >> 1; rp.pair = n >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1)); asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
return rp.subreg.odd; return rp.subreg.odd;
#else /* __s390x__ */
return n / base;
#endif /* __s390x__ */
} }
#else /* __s390x__ */ #define cputime_one_jiffy jiffies_to_cputime(1)
static inline unsigned int /*
__div(unsigned long long n, unsigned int base) * Convert cputime to jiffies and back.
*/
static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
{ {
return n / base; return __div((__force unsigned long long) cputime, 4096000000ULL / HZ);
} }
#endif /* __s390x__ */ static inline cputime_t jiffies_to_cputime(const unsigned int jif)
{
return (__force cputime_t)(jif * (4096000000ULL / HZ));
}
#define cputime_zero (0ULL) static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
#define cputime_one_jiffy jiffies_to_cputime(1) {
#define cputime_max ((~0UL >> 1) - 1) unsigned long long jif = (__force unsigned long long) cputime;
#define cputime_add(__a, __b) ((__a) + (__b)) do_div(jif, 4096000000ULL / HZ);
#define cputime_sub(__a, __b) ((__a) - (__b)) return jif;
#define cputime_div(__a, __n) ({ \ }
unsigned long long __div = (__a); \
do_div(__div,__n); \ static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
__div; \ {
}) return (__force cputime64_t)(jif * (4096000000ULL / HZ));
#define cputime_halve(__a) ((__a) >> 1)
#define cputime_eq(__a, __b) ((__a) == (__b))
#define cputime_gt(__a, __b) ((__a) > (__b))
#define cputime_ge(__a, __b) ((__a) >= (__b))
#define cputime_lt(__a, __b) ((__a) < (__b))
#define cputime_le(__a, __b) ((__a) <= (__b))
#define cputime_to_jiffies(__ct) (__div((__ct), 4096000000ULL / HZ))
#define cputime_to_scaled(__ct) (__ct)
#define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (4096000000ULL / HZ))
#define cputime64_zero (0ULL)
#define cputime64_add(__a, __b) ((__a) + (__b))
#define cputime_to_cputime64(__ct) (__ct)
static inline u64
cputime64_to_jiffies64(cputime64_t cputime)
{
do_div(cputime, 4096000000ULL / HZ);
return cputime;
} }
/* /*
* Convert cputime to microseconds and back. * Convert cputime to microseconds and back.
*/ */
static inline unsigned int static inline unsigned int cputime_to_usecs(const cputime_t cputime)
cputime_to_usecs(const cputime_t cputime)
{ {
return cputime_div(cputime, 4096); return (__force unsigned long long) cputime >> 12;
} }
static inline cputime_t static inline cputime_t usecs_to_cputime(const unsigned int m)
usecs_to_cputime(const unsigned int m)
{ {
return (cputime_t) m * 4096; return (__force cputime_t)(m * 4096ULL);
} }
/* /*
* Convert cputime to milliseconds and back. * Convert cputime to milliseconds and back.
*/ */
static inline unsigned int static inline unsigned int cputime_to_secs(const cputime_t cputime)
cputime_to_secs(const cputime_t cputime)
{ {
return __div(cputime, 2048000000) >> 1; return __div((__force unsigned long long) cputime, 2048000000) >> 1;
} }
static inline cputime_t static inline cputime_t secs_to_cputime(const unsigned int s)
secs_to_cputime(const unsigned int s)
{ {
return (cputime_t) s * 4096000000ULL; return (__force cputime_t)(s * 4096000000ULL);
} }
/* /*
* Convert cputime to timespec and back. * Convert cputime to timespec and back.
*/ */
static inline cputime_t static inline cputime_t timespec_to_cputime(const struct timespec *value)
timespec_to_cputime(const struct timespec *value)
{ {
return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL; unsigned long long ret = value->tv_sec * 4096000000ULL;
return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000);
} }
static inline void static inline void cputime_to_timespec(const cputime_t cputime,
cputime_to_timespec(const cputime_t cputime, struct timespec *value) struct timespec *value)
{ {
unsigned long long __cputime = (__force unsigned long long) cputime;
#ifndef __s390x__ #ifndef __s390x__
register_pair rp; register_pair rp;
rp.pair = cputime >> 1; rp.pair = __cputime >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
value->tv_nsec = rp.subreg.even * 1000 / 4096; value->tv_nsec = rp.subreg.even * 1000 / 4096;
value->tv_sec = rp.subreg.odd; value->tv_sec = rp.subreg.odd;
#else #else
value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096; value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096;
value->tv_sec = cputime / 4096000000ULL; value->tv_sec = __cputime / 4096000000ULL;
#endif #endif
} }
...@@ -132,50 +116,52 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value) ...@@ -132,50 +116,52 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value)
* Since cputime and timeval have the same resolution (microseconds) * Since cputime and timeval have the same resolution (microseconds)
* this is easy. * this is easy.
*/ */
static inline cputime_t static inline cputime_t timeval_to_cputime(const struct timeval *value)
timeval_to_cputime(const struct timeval *value)
{ {
return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL; unsigned long long ret = value->tv_sec * 4096000000ULL;
return (__force cputime_t)(ret + value->tv_usec * 4096ULL);
} }
static inline void static inline void cputime_to_timeval(const cputime_t cputime,
cputime_to_timeval(const cputime_t cputime, struct timeval *value) struct timeval *value)
{ {
unsigned long long __cputime = (__force unsigned long long) cputime;
#ifndef __s390x__ #ifndef __s390x__
register_pair rp; register_pair rp;
rp.pair = cputime >> 1; rp.pair = __cputime >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
value->tv_usec = rp.subreg.even / 4096; value->tv_usec = rp.subreg.even / 4096;
value->tv_sec = rp.subreg.odd; value->tv_sec = rp.subreg.odd;
#else #else
value->tv_usec = (cputime % 4096000000ULL) / 4096; value->tv_usec = (__cputime % 4096000000ULL) / 4096;
value->tv_sec = cputime / 4096000000ULL; value->tv_sec = __cputime / 4096000000ULL;
#endif #endif
} }
/* /*
* Convert cputime to clock and back. * Convert cputime to clock and back.
*/ */
static inline clock_t static inline clock_t cputime_to_clock_t(cputime_t cputime)
cputime_to_clock_t(cputime_t cputime)
{ {
return cputime_div(cputime, 4096000000ULL / USER_HZ); unsigned long long clock = (__force unsigned long long) cputime;
do_div(clock, 4096000000ULL / USER_HZ);
return clock;
} }
static inline cputime_t static inline cputime_t clock_t_to_cputime(unsigned long x)
clock_t_to_cputime(unsigned long x)
{ {
return (cputime_t) x * (4096000000ULL / USER_HZ); return (__force cputime_t)(x * (4096000000ULL / USER_HZ));
} }
/* /*
* Convert cputime64 to clock. * Convert cputime64 to clock.
*/ */
static inline clock_t static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
cputime64_to_clock_t(cputime64_t cputime)
{ {
return cputime_div(cputime, 4096000000ULL / USER_HZ); unsigned long long clock = (__force unsigned long long) cputime;
do_div(clock, 4096000000ULL / USER_HZ);
return clock;
} }
struct s390_idle_data { struct s390_idle_data {
......
...@@ -103,15 +103,14 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, ...@@ -103,15 +103,14 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
cputime64_t busy_time; cputime64_t busy_time;
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, busy_time = kstat_cpu(cpu).cpustat.user;
kstat_cpu(cpu).cpustat.system); busy_time += kstat_cpu(cpu).cpustat.system;
busy_time += kstat_cpu(cpu).cpustat.irq;
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); busy_time += kstat_cpu(cpu).cpustat.softirq;
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); busy_time += kstat_cpu(cpu).cpustat.steal;
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); busy_time += kstat_cpu(cpu).cpustat.nice;
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
idle_time = cur_wall_time - busy_time;
idle_time = cputime64_sub(cur_wall_time, busy_time);
if (wall) if (wall)
*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
...@@ -353,20 +352,20 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) ...@@ -353,20 +352,20 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
wall_time = (unsigned int) cputime64_sub(cur_wall_time, wall_time = (unsigned int)
j_dbs_info->prev_cpu_wall); (cur_wall_time - j_dbs_info->prev_cpu_wall);
j_dbs_info->prev_cpu_wall = cur_wall_time; j_dbs_info->prev_cpu_wall = cur_wall_time;
idle_time = (unsigned int) cputime64_sub(cur_idle_time, idle_time = (unsigned int)
j_dbs_info->prev_cpu_idle); (cur_idle_time - j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time; j_dbs_info->prev_cpu_idle = cur_idle_time;
if (dbs_tuners_ins.ignore_nice) { if (dbs_tuners_ins.ignore_nice) {
cputime64_t cur_nice; cputime64_t cur_nice;
unsigned long cur_nice_jiffies; unsigned long cur_nice_jiffies;
cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, cur_nice = kstat_cpu(j).cpustat.nice -
j_dbs_info->prev_cpu_nice); j_dbs_info->prev_cpu_nice;
/* /*
* Assumption: nice time between sampling periods will * Assumption: nice time between sampling periods will
* be less than 2^32 jiffies for 32 bit sys * be less than 2^32 jiffies for 32 bit sys
......
...@@ -127,15 +127,14 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, ...@@ -127,15 +127,14 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
cputime64_t busy_time; cputime64_t busy_time;
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, busy_time = kstat_cpu(cpu).cpustat.user;
kstat_cpu(cpu).cpustat.system); busy_time += kstat_cpu(cpu).cpustat.system;
busy_time += kstat_cpu(cpu).cpustat.irq;
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); busy_time += kstat_cpu(cpu).cpustat.softirq;
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); busy_time += kstat_cpu(cpu).cpustat.steal;
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); busy_time += kstat_cpu(cpu).cpustat.nice;
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
idle_time = cur_wall_time - busy_time;
idle_time = cputime64_sub(cur_wall_time, busy_time);
if (wall) if (wall)
*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
...@@ -442,24 +441,24 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) ...@@ -442,24 +441,24 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
wall_time = (unsigned int) cputime64_sub(cur_wall_time, wall_time = (unsigned int)
j_dbs_info->prev_cpu_wall); (cur_wall_time - j_dbs_info->prev_cpu_wall);
j_dbs_info->prev_cpu_wall = cur_wall_time; j_dbs_info->prev_cpu_wall = cur_wall_time;
idle_time = (unsigned int) cputime64_sub(cur_idle_time, idle_time = (unsigned int)
j_dbs_info->prev_cpu_idle); (cur_idle_time - j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time; j_dbs_info->prev_cpu_idle = cur_idle_time;
iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, iowait_time = (unsigned int)
j_dbs_info->prev_cpu_iowait); (cur_iowait_time - j_dbs_info->prev_cpu_iowait);
j_dbs_info->prev_cpu_iowait = cur_iowait_time; j_dbs_info->prev_cpu_iowait = cur_iowait_time;
if (dbs_tuners_ins.ignore_nice) { if (dbs_tuners_ins.ignore_nice) {
cputime64_t cur_nice; cputime64_t cur_nice;
unsigned long cur_nice_jiffies; unsigned long cur_nice_jiffies;
cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, cur_nice = kstat_cpu(j).cpustat.nice -
j_dbs_info->prev_cpu_nice); j_dbs_info->prev_cpu_nice;
/* /*
* Assumption: nice time between sampling periods will * Assumption: nice time between sampling periods will
* be less than 2^32 jiffies for 32 bit sys * be less than 2^32 jiffies for 32 bit sys
......
...@@ -61,9 +61,8 @@ static int cpufreq_stats_update(unsigned int cpu) ...@@ -61,9 +61,8 @@ static int cpufreq_stats_update(unsigned int cpu)
spin_lock(&cpufreq_stats_lock); spin_lock(&cpufreq_stats_lock);
stat = per_cpu(cpufreq_stats_table, cpu); stat = per_cpu(cpufreq_stats_table, cpu);
if (stat->time_in_state) if (stat->time_in_state)
stat->time_in_state[stat->last_index] = stat->time_in_state[stat->last_index] +=
cputime64_add(stat->time_in_state[stat->last_index], cur_time - stat->last_time;
cputime_sub(cur_time, stat->last_time));
stat->last_time = cur_time; stat->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock); spin_unlock(&cpufreq_stats_lock);
return 0; return 0;
......
...@@ -83,11 +83,10 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu) ...@@ -83,11 +83,10 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
{ {
cputime64_t retval; cputime64_t retval;
retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, retval = kstat_cpu(cpu).cpustat.idle + kstat_cpu(cpu).cpustat.iowait;
kstat_cpu(cpu).cpustat.iowait);
if (rackmeter_ignore_nice) if (rackmeter_ignore_nice)
retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); retval += kstat_cpu(cpu).cpustat.nice;
return retval; return retval;
} }
...@@ -220,13 +219,11 @@ static void rackmeter_do_timer(struct work_struct *work) ...@@ -220,13 +219,11 @@ static void rackmeter_do_timer(struct work_struct *work)
int i, offset, load, cumm, pause; int i, offset, load, cumm, pause;
cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
total_ticks = (unsigned int)cputime64_sub(cur_jiffies, total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall);
rcpu->prev_wall);
rcpu->prev_wall = cur_jiffies; rcpu->prev_wall = cur_jiffies;
total_idle_ticks = get_cpu_idle_time(cpu); total_idle_ticks = get_cpu_idle_time(cpu);
idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks, idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle);
rcpu->prev_idle);
rcpu->prev_idle = total_idle_ticks; rcpu->prev_idle = total_idle_ticks;
/* We do a very dumb calculation to update the LEDs for now, /* We do a very dumb calculation to update the LEDs for now,
......
...@@ -394,8 +394,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, ...@@ -394,8 +394,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
sigemptyset(&sigign); sigemptyset(&sigign);
sigemptyset(&sigcatch); sigemptyset(&sigcatch);
cutime = cstime = utime = stime = cputime_zero; cutime = cstime = utime = stime = 0;
cgtime = gtime = cputime_zero; cgtime = gtime = 0;
if (lock_task_sighand(task, &flags)) { if (lock_task_sighand(task, &flags)) {
struct signal_struct *sig = task->signal; struct signal_struct *sig = task->signal;
...@@ -423,14 +423,14 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, ...@@ -423,14 +423,14 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
do { do {
min_flt += t->min_flt; min_flt += t->min_flt;
maj_flt += t->maj_flt; maj_flt += t->maj_flt;
gtime = cputime_add(gtime, t->gtime); gtime += t->gtime;
t = next_thread(t); t = next_thread(t);
} while (t != task); } while (t != task);
min_flt += sig->min_flt; min_flt += sig->min_flt;
maj_flt += sig->maj_flt; maj_flt += sig->maj_flt;
thread_group_times(task, &utime, &stime); thread_group_times(task, &utime, &stime);
gtime = cputime_add(gtime, sig->gtime); gtime += sig->gtime;
} }
sid = task_session_nr_ns(task, ns); sid = task_session_nr_ns(task, ns);
......
...@@ -30,7 +30,7 @@ static cputime64_t get_idle_time(int cpu) ...@@ -30,7 +30,7 @@ static cputime64_t get_idle_time(int cpu)
if (idle_time == -1ULL) { if (idle_time == -1ULL) {
/* !NO_HZ so we can rely on cpustat.idle */ /* !NO_HZ so we can rely on cpustat.idle */
idle = kstat_cpu(cpu).cpustat.idle; idle = kstat_cpu(cpu).cpustat.idle;
idle = cputime64_add(idle, arch_idle_time(cpu)); idle += arch_idle_time(cpu);
} else } else
idle = nsecs_to_jiffies64(1000 * idle_time); idle = nsecs_to_jiffies64(1000 * idle_time);
...@@ -63,23 +63,22 @@ static int show_stat(struct seq_file *p, void *v) ...@@ -63,23 +63,22 @@ static int show_stat(struct seq_file *p, void *v)
struct timespec boottime; struct timespec boottime;
user = nice = system = idle = iowait = user = nice = system = idle = iowait =
irq = softirq = steal = cputime64_zero; irq = softirq = steal = 0;
guest = guest_nice = cputime64_zero; guest = guest_nice = 0;
getboottime(&boottime); getboottime(&boottime);
jif = boottime.tv_sec; jif = boottime.tv_sec;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
user = cputime64_add(user, kstat_cpu(i).cpustat.user); user += kstat_cpu(i).cpustat.user;
nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice); nice += kstat_cpu(i).cpustat.nice;
system = cputime64_add(system, kstat_cpu(i).cpustat.system); system += kstat_cpu(i).cpustat.system;
idle = cputime64_add(idle, get_idle_time(i)); idle += get_idle_time(i);
iowait = cputime64_add(iowait, get_iowait_time(i)); iowait += get_iowait_time(i);
irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); irq += kstat_cpu(i).cpustat.irq;
softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); softirq += kstat_cpu(i).cpustat.softirq;
steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); steal += kstat_cpu(i).cpustat.steal;
guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); guest += kstat_cpu(i).cpustat.guest;
guest_nice = cputime64_add(guest_nice, guest_nice += kstat_cpu(i).cpustat.guest_nice;
kstat_cpu(i).cpustat.guest_nice);
sum += kstat_cpu_irqs_sum(i); sum += kstat_cpu_irqs_sum(i);
sum += arch_irq_stat_cpu(i); sum += arch_irq_stat_cpu(i);
......
...@@ -12,10 +12,10 @@ static int uptime_proc_show(struct seq_file *m, void *v) ...@@ -12,10 +12,10 @@ static int uptime_proc_show(struct seq_file *m, void *v)
struct timespec uptime; struct timespec uptime;
struct timespec idle; struct timespec idle;
int i; int i;
cputime_t idletime = cputime_zero; cputime_t idletime = 0;
for_each_possible_cpu(i) for_each_possible_cpu(i)
idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle); idletime += kstat_cpu(i).cpustat.idle;
do_posix_clock_monotonic_gettime(&uptime); do_posix_clock_monotonic_gettime(&uptime);
monotonic_to_bootbased(&uptime); monotonic_to_bootbased(&uptime);
......
...@@ -4,70 +4,64 @@ ...@@ -4,70 +4,64 @@
#include <linux/time.h> #include <linux/time.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
typedef unsigned long cputime_t; typedef unsigned long __nocast cputime_t;
#define cputime_zero (0UL)
#define cputime_one_jiffy jiffies_to_cputime(1) #define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_max ((~0UL >> 1) - 1) #define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
#define cputime_add(__a, __b) ((__a) + (__b))
#define cputime_sub(__a, __b) ((__a) - (__b))
#define cputime_div(__a, __n) ((__a) / (__n))
#define cputime_halve(__a) ((__a) >> 1)
#define cputime_eq(__a, __b) ((__a) == (__b))
#define cputime_gt(__a, __b) ((__a) > (__b))
#define cputime_ge(__a, __b) ((__a) >= (__b))
#define cputime_lt(__a, __b) ((__a) < (__b))
#define cputime_le(__a, __b) ((__a) <= (__b))
#define cputime_to_jiffies(__ct) (__ct)
#define cputime_to_scaled(__ct) (__ct) #define cputime_to_scaled(__ct) (__ct)
#define jiffies_to_cputime(__hz) (__hz) #define jiffies_to_cputime(__hz) (__force cputime_t)(__hz)
typedef u64 cputime64_t; typedef u64 __nocast cputime64_t;
#define cputime64_zero (0ULL) #define cputime64_to_jiffies64(__ct) (__force u64)(__ct)
#define cputime64_add(__a, __b) ((__a) + (__b)) #define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif)
#define cputime64_sub(__a, __b) ((__a) - (__b))
#define cputime64_to_jiffies64(__ct) (__ct)
#define jiffies64_to_cputime64(__jif) (__jif)
#define cputime_to_cputime64(__ct) ((u64) __ct)
#define cputime64_gt(__a, __b) ((__a) > (__b))
#define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct) #define nsecs_to_cputime64(__ct) \
jiffies64_to_cputime64(nsecs_to_jiffies64(__ct))
/* /*
* Convert cputime to microseconds and back. * Convert cputime to microseconds and back.
*/ */
#define cputime_to_usecs(__ct) jiffies_to_usecs(__ct) #define cputime_to_usecs(__ct) \
#define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs) jiffies_to_usecs(cputime_to_jiffies(__ct));
#define usecs_to_cputime(__msecs) \
jiffies_to_cputime(usecs_to_jiffies(__msecs));
/* /*
* Convert cputime to seconds and back. * Convert cputime to seconds and back.
*/ */
#define cputime_to_secs(jif) ((jif) / HZ) #define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ)
#define secs_to_cputime(sec) ((sec) * HZ) #define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ)
/* /*
* Convert cputime to timespec and back. * Convert cputime to timespec and back.
*/ */
#define timespec_to_cputime(__val) timespec_to_jiffies(__val) #define timespec_to_cputime(__val) \
#define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val) jiffies_to_cputime(timespec_to_jiffies(__val))
#define cputime_to_timespec(__ct,__val) \
jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
/* /*
* Convert cputime to timeval and back. * Convert cputime to timeval and back.
*/ */
#define timeval_to_cputime(__val) timeval_to_jiffies(__val) #define timeval_to_cputime(__val) \
#define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val) jiffies_to_cputime(timeval_to_jiffies(__val))
#define cputime_to_timeval(__ct,__val) \
jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
/* /*
* Convert cputime to clock and back. * Convert cputime to clock and back.
*/ */
#define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct) #define cputime_to_clock_t(__ct) \
#define clock_t_to_cputime(__x) clock_t_to_jiffies(__x) jiffies_to_clock_t(cputime_to_jiffies(__ct))
#define clock_t_to_cputime(__x) \
jiffies_to_cputime(clock_t_to_jiffies(__x))
/* /*
* Convert cputime64 to clock. * Convert cputime64 to clock.
*/ */
#define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct) #define cputime64_to_clock_t(__ct) \
jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
#endif #endif
...@@ -483,8 +483,8 @@ struct task_cputime { ...@@ -483,8 +483,8 @@ struct task_cputime {
#define INIT_CPUTIME \ #define INIT_CPUTIME \
(struct task_cputime) { \ (struct task_cputime) { \
.utime = cputime_zero, \ .utime = 0, \
.stime = cputime_zero, \ .stime = 0, \
.sum_exec_runtime = 0, \ .sum_exec_runtime = 0, \
} }
......
...@@ -613,8 +613,8 @@ void acct_collect(long exitcode, int group_dead) ...@@ -613,8 +613,8 @@ void acct_collect(long exitcode, int group_dead)
pacct->ac_flag |= ACORE; pacct->ac_flag |= ACORE;
if (current->flags & PF_SIGNALED) if (current->flags & PF_SIGNALED)
pacct->ac_flag |= AXSIG; pacct->ac_flag |= AXSIG;
pacct->ac_utime = cputime_add(pacct->ac_utime, current->utime); pacct->ac_utime += current->utime;
pacct->ac_stime = cputime_add(pacct->ac_stime, current->stime); pacct->ac_stime += current->stime;
pacct->ac_minflt += current->min_flt; pacct->ac_minflt += current->min_flt;
pacct->ac_majflt += current->maj_flt; pacct->ac_majflt += current->maj_flt;
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
......
...@@ -178,8 +178,7 @@ static inline void check_for_tasks(int cpu) ...@@ -178,8 +178,7 @@ static inline void check_for_tasks(int cpu)
write_lock_irq(&tasklist_lock); write_lock_irq(&tasklist_lock);
for_each_process(p) { for_each_process(p) {
if (task_cpu(p) == cpu && p->state == TASK_RUNNING && if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
(!cputime_eq(p->utime, cputime_zero) || (p->utime || p->stime))
!cputime_eq(p->stime, cputime_zero)))
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
"(state = %ld, flags = %x)\n", "(state = %ld, flags = %x)\n",
p->comm, task_pid_nr(p), cpu, p->comm, task_pid_nr(p), cpu,
......
...@@ -121,9 +121,9 @@ static void __exit_signal(struct task_struct *tsk) ...@@ -121,9 +121,9 @@ static void __exit_signal(struct task_struct *tsk)
* We won't ever get here for the group leader, since it * We won't ever get here for the group leader, since it
* will have been the last reference on the signal_struct. * will have been the last reference on the signal_struct.
*/ */
sig->utime = cputime_add(sig->utime, tsk->utime); sig->utime += tsk->utime;
sig->stime = cputime_add(sig->stime, tsk->stime); sig->stime += tsk->stime;
sig->gtime = cputime_add(sig->gtime, tsk->gtime); sig->gtime += tsk->gtime;
sig->min_flt += tsk->min_flt; sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt; sig->maj_flt += tsk->maj_flt;
sig->nvcsw += tsk->nvcsw; sig->nvcsw += tsk->nvcsw;
...@@ -1255,19 +1255,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) ...@@ -1255,19 +1255,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
spin_lock_irq(&p->real_parent->sighand->siglock); spin_lock_irq(&p->real_parent->sighand->siglock);
psig = p->real_parent->signal; psig = p->real_parent->signal;
sig = p->signal; sig = p->signal;
psig->cutime = psig->cutime += tgutime + sig->cutime;
cputime_add(psig->cutime, psig->cstime += tgstime + sig->cstime;
cputime_add(tgutime, psig->cgtime += p->gtime + sig->gtime + sig->cgtime;
sig->cutime));
psig->cstime =
cputime_add(psig->cstime,
cputime_add(tgstime,
sig->cstime));
psig->cgtime =
cputime_add(psig->cgtime,
cputime_add(p->gtime,
cputime_add(sig->gtime,
sig->cgtime)));
psig->cmin_flt += psig->cmin_flt +=
p->min_flt + sig->min_flt + sig->cmin_flt; p->min_flt + sig->min_flt + sig->cmin_flt;
psig->cmaj_flt += psig->cmaj_flt +=
......
...@@ -1023,8 +1023,8 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p) ...@@ -1023,8 +1023,8 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
*/ */
static void posix_cpu_timers_init(struct task_struct *tsk) static void posix_cpu_timers_init(struct task_struct *tsk)
{ {
tsk->cputime_expires.prof_exp = cputime_zero; tsk->cputime_expires.prof_exp = 0;
tsk->cputime_expires.virt_exp = cputime_zero; tsk->cputime_expires.virt_exp = 0;
tsk->cputime_expires.sched_exp = 0; tsk->cputime_expires.sched_exp = 0;
INIT_LIST_HEAD(&tsk->cpu_timers[0]); INIT_LIST_HEAD(&tsk->cpu_timers[0]);
INIT_LIST_HEAD(&tsk->cpu_timers[1]); INIT_LIST_HEAD(&tsk->cpu_timers[1]);
...@@ -1132,14 +1132,10 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1132,14 +1132,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
init_sigpending(&p->pending); init_sigpending(&p->pending);
p->utime = cputime_zero; p->utime = p->stime = p->gtime = 0;
p->stime = cputime_zero; p->utimescaled = p->stimescaled = 0;
p->gtime = cputime_zero;
p->utimescaled = cputime_zero;
p->stimescaled = cputime_zero;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING #ifndef CONFIG_VIRT_CPU_ACCOUNTING
p->prev_utime = cputime_zero; p->prev_utime = p->prev_stime = 0;
p->prev_stime = cputime_zero;
#endif #endif
#if defined(SPLIT_RSS_COUNTING) #if defined(SPLIT_RSS_COUNTING)
memset(&p->rss_stat, 0, sizeof(p->rss_stat)); memset(&p->rss_stat, 0, sizeof(p->rss_stat));
......
...@@ -52,22 +52,22 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, ...@@ -52,22 +52,22 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
cval = it->expires; cval = it->expires;
cinterval = it->incr; cinterval = it->incr;
if (!cputime_eq(cval, cputime_zero)) { if (cval) {
struct task_cputime cputime; struct task_cputime cputime;
cputime_t t; cputime_t t;
thread_group_cputimer(tsk, &cputime); thread_group_cputimer(tsk, &cputime);
if (clock_id == CPUCLOCK_PROF) if (clock_id == CPUCLOCK_PROF)
t = cputime_add(cputime.utime, cputime.stime); t = cputime.utime + cputime.stime;
else else
/* CPUCLOCK_VIRT */ /* CPUCLOCK_VIRT */
t = cputime.utime; t = cputime.utime;
if (cputime_le(cval, t)) if (cval < t)
/* about to fire */ /* about to fire */
cval = cputime_one_jiffy; cval = cputime_one_jiffy;
else else
cval = cputime_sub(cval, t); cval = cval - t;
} }
spin_unlock_irq(&tsk->sighand->siglock); spin_unlock_irq(&tsk->sighand->siglock);
...@@ -161,10 +161,9 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, ...@@ -161,10 +161,9 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
cval = it->expires; cval = it->expires;
cinterval = it->incr; cinterval = it->incr;
if (!cputime_eq(cval, cputime_zero) || if (cval || nval) {
!cputime_eq(nval, cputime_zero)) { if (nval > 0)
if (cputime_gt(nval, cputime_zero)) nval += cputime_one_jiffy;
nval = cputime_add(nval, cputime_one_jiffy);
set_process_cpu_timer(tsk, clock_id, &nval, &cval); set_process_cpu_timer(tsk, clock_id, &nval, &cval);
} }
it->expires = nval; it->expires = nval;
......
...@@ -78,7 +78,7 @@ static inline int cpu_time_before(const clockid_t which_clock, ...@@ -78,7 +78,7 @@ static inline int cpu_time_before(const clockid_t which_clock,
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
return now.sched < then.sched; return now.sched < then.sched;
} else { } else {
return cputime_lt(now.cpu, then.cpu); return now.cpu < then.cpu;
} }
} }
static inline void cpu_time_add(const clockid_t which_clock, static inline void cpu_time_add(const clockid_t which_clock,
...@@ -88,7 +88,7 @@ static inline void cpu_time_add(const clockid_t which_clock, ...@@ -88,7 +88,7 @@ static inline void cpu_time_add(const clockid_t which_clock,
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
acc->sched += val.sched; acc->sched += val.sched;
} else { } else {
acc->cpu = cputime_add(acc->cpu, val.cpu); acc->cpu += val.cpu;
} }
} }
static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
...@@ -98,24 +98,11 @@ static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, ...@@ -98,24 +98,11 @@ static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
a.sched -= b.sched; a.sched -= b.sched;
} else { } else {
a.cpu = cputime_sub(a.cpu, b.cpu); a.cpu -= b.cpu;
} }
return a; return a;
} }
/*
* Divide and limit the result to res >= 1
*
* This is necessary to prevent signal delivery starvation, when the result of
* the division would be rounded down to 0.
*/
static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
{
cputime_t res = cputime_div(time, div);
return max_t(cputime_t, res, 1);
}
/* /*
* Update expiry time from increment, and increase overrun count, * Update expiry time from increment, and increase overrun count,
* given the current clock sample. * given the current clock sample.
...@@ -148,28 +135,26 @@ static void bump_cpu_timer(struct k_itimer *timer, ...@@ -148,28 +135,26 @@ static void bump_cpu_timer(struct k_itimer *timer,
} else { } else {
cputime_t delta, incr; cputime_t delta, incr;
if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu)) if (now.cpu < timer->it.cpu.expires.cpu)
return; return;
incr = timer->it.cpu.incr.cpu; incr = timer->it.cpu.incr.cpu;
delta = cputime_sub(cputime_add(now.cpu, incr), delta = now.cpu + incr - timer->it.cpu.expires.cpu;
timer->it.cpu.expires.cpu);
/* Don't use (incr*2 < delta), incr*2 might overflow. */ /* Don't use (incr*2 < delta), incr*2 might overflow. */
for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) for (i = 0; incr < delta - incr; i++)
incr = cputime_add(incr, incr); incr += incr;
for (; i >= 0; incr = cputime_halve(incr), i--) { for (; i >= 0; incr = incr >> 1, i--) {
if (cputime_lt(delta, incr)) if (delta < incr)
continue; continue;
timer->it.cpu.expires.cpu = timer->it.cpu.expires.cpu += incr;
cputime_add(timer->it.cpu.expires.cpu, incr);
timer->it_overrun += 1 << i; timer->it_overrun += 1 << i;
delta = cputime_sub(delta, incr); delta -= incr;
} }
} }
} }
static inline cputime_t prof_ticks(struct task_struct *p) static inline cputime_t prof_ticks(struct task_struct *p)
{ {
return cputime_add(p->utime, p->stime); return p->utime + p->stime;
} }
static inline cputime_t virt_ticks(struct task_struct *p) static inline cputime_t virt_ticks(struct task_struct *p)
{ {
...@@ -248,8 +233,8 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) ...@@ -248,8 +233,8 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
t = tsk; t = tsk;
do { do {
times->utime = cputime_add(times->utime, t->utime); times->utime += t->utime;
times->stime = cputime_add(times->stime, t->stime); times->stime += t->stime;
times->sum_exec_runtime += task_sched_runtime(t); times->sum_exec_runtime += task_sched_runtime(t);
} while_each_thread(tsk, t); } while_each_thread(tsk, t);
out: out:
...@@ -258,10 +243,10 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) ...@@ -258,10 +243,10 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b) static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
{ {
if (cputime_gt(b->utime, a->utime)) if (b->utime > a->utime)
a->utime = b->utime; a->utime = b->utime;
if (cputime_gt(b->stime, a->stime)) if (b->stime > a->stime)
a->stime = b->stime; a->stime = b->stime;
if (b->sum_exec_runtime > a->sum_exec_runtime) if (b->sum_exec_runtime > a->sum_exec_runtime)
...@@ -306,7 +291,7 @@ static int cpu_clock_sample_group(const clockid_t which_clock, ...@@ -306,7 +291,7 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
return -EINVAL; return -EINVAL;
case CPUCLOCK_PROF: case CPUCLOCK_PROF:
thread_group_cputime(p, &cputime); thread_group_cputime(p, &cputime);
cpu->cpu = cputime_add(cputime.utime, cputime.stime); cpu->cpu = cputime.utime + cputime.stime;
break; break;
case CPUCLOCK_VIRT: case CPUCLOCK_VIRT:
thread_group_cputime(p, &cputime); thread_group_cputime(p, &cputime);
...@@ -470,26 +455,24 @@ static void cleanup_timers(struct list_head *head, ...@@ -470,26 +455,24 @@ static void cleanup_timers(struct list_head *head,
unsigned long long sum_exec_runtime) unsigned long long sum_exec_runtime)
{ {
struct cpu_timer_list *timer, *next; struct cpu_timer_list *timer, *next;
cputime_t ptime = cputime_add(utime, stime); cputime_t ptime = utime + stime;
list_for_each_entry_safe(timer, next, head, entry) { list_for_each_entry_safe(timer, next, head, entry) {
list_del_init(&timer->entry); list_del_init(&timer->entry);
if (cputime_lt(timer->expires.cpu, ptime)) { if (timer->expires.cpu < ptime) {
timer->expires.cpu = cputime_zero; timer->expires.cpu = 0;
} else { } else {
timer->expires.cpu = cputime_sub(timer->expires.cpu, timer->expires.cpu -= ptime;
ptime);
} }
} }
++head; ++head;
list_for_each_entry_safe(timer, next, head, entry) { list_for_each_entry_safe(timer, next, head, entry) {
list_del_init(&timer->entry); list_del_init(&timer->entry);
if (cputime_lt(timer->expires.cpu, utime)) { if (timer->expires.cpu < utime) {
timer->expires.cpu = cputime_zero; timer->expires.cpu = 0;
} else { } else {
timer->expires.cpu = cputime_sub(timer->expires.cpu, timer->expires.cpu -= utime;
utime);
} }
} }
...@@ -520,8 +503,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk) ...@@ -520,8 +503,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
struct signal_struct *const sig = tsk->signal; struct signal_struct *const sig = tsk->signal;
cleanup_timers(tsk->signal->cpu_timers, cleanup_timers(tsk->signal->cpu_timers,
cputime_add(tsk->utime, sig->utime), tsk->utime + sig->utime, tsk->stime + sig->stime,
cputime_add(tsk->stime, sig->stime),
tsk->se.sum_exec_runtime + sig->sum_sched_runtime); tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
} }
...@@ -540,8 +522,7 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) ...@@ -540,8 +522,7 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
static inline int expires_gt(cputime_t expires, cputime_t new_exp) static inline int expires_gt(cputime_t expires, cputime_t new_exp)
{ {
return cputime_eq(expires, cputime_zero) || return expires == 0 || expires > new_exp;
cputime_gt(expires, new_exp);
} }
/* /*
...@@ -651,7 +632,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock, ...@@ -651,7 +632,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock,
default: default:
return -EINVAL; return -EINVAL;
case CPUCLOCK_PROF: case CPUCLOCK_PROF:
cpu->cpu = cputime_add(cputime.utime, cputime.stime); cpu->cpu = cputime.utime + cputime.stime;
break; break;
case CPUCLOCK_VIRT: case CPUCLOCK_VIRT:
cpu->cpu = cputime.utime; cpu->cpu = cputime.utime;
...@@ -918,12 +899,12 @@ static void check_thread_timers(struct task_struct *tsk, ...@@ -918,12 +899,12 @@ static void check_thread_timers(struct task_struct *tsk,
unsigned long soft; unsigned long soft;
maxfire = 20; maxfire = 20;
tsk->cputime_expires.prof_exp = cputime_zero; tsk->cputime_expires.prof_exp = 0;
while (!list_empty(timers)) { while (!list_empty(timers)) {
struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list, struct cpu_timer_list,
entry); entry);
if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) { if (!--maxfire || prof_ticks(tsk) < t->expires.cpu) {
tsk->cputime_expires.prof_exp = t->expires.cpu; tsk->cputime_expires.prof_exp = t->expires.cpu;
break; break;
} }
...@@ -933,12 +914,12 @@ static void check_thread_timers(struct task_struct *tsk, ...@@ -933,12 +914,12 @@ static void check_thread_timers(struct task_struct *tsk,
++timers; ++timers;
maxfire = 20; maxfire = 20;
tsk->cputime_expires.virt_exp = cputime_zero; tsk->cputime_expires.virt_exp = 0;
while (!list_empty(timers)) { while (!list_empty(timers)) {
struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list, struct cpu_timer_list,
entry); entry);
if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) { if (!--maxfire || virt_ticks(tsk) < t->expires.cpu) {
tsk->cputime_expires.virt_exp = t->expires.cpu; tsk->cputime_expires.virt_exp = t->expires.cpu;
break; break;
} }
...@@ -1009,20 +990,19 @@ static u32 onecputick; ...@@ -1009,20 +990,19 @@ static u32 onecputick;
static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
cputime_t *expires, cputime_t cur_time, int signo) cputime_t *expires, cputime_t cur_time, int signo)
{ {
if (cputime_eq(it->expires, cputime_zero)) if (!it->expires)
return; return;
if (cputime_ge(cur_time, it->expires)) { if (cur_time >= it->expires) {
if (!cputime_eq(it->incr, cputime_zero)) { if (it->incr) {
it->expires = cputime_add(it->expires, it->incr); it->expires += it->incr;
it->error += it->incr_error; it->error += it->incr_error;
if (it->error >= onecputick) { if (it->error >= onecputick) {
it->expires = cputime_sub(it->expires, it->expires -= cputime_one_jiffy;
cputime_one_jiffy);
it->error -= onecputick; it->error -= onecputick;
} }
} else { } else {
it->expires = cputime_zero; it->expires = 0;
} }
trace_itimer_expire(signo == SIGPROF ? trace_itimer_expire(signo == SIGPROF ?
...@@ -1031,9 +1011,7 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, ...@@ -1031,9 +1011,7 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
__group_send_sig_info(signo, SEND_SIG_PRIV, tsk); __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
} }
if (!cputime_eq(it->expires, cputime_zero) && if (it->expires && (!*expires || it->expires < *expires)) {
(cputime_eq(*expires, cputime_zero) ||
cputime_lt(it->expires, *expires))) {
*expires = it->expires; *expires = it->expires;
} }
} }
...@@ -1048,9 +1026,7 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, ...@@ -1048,9 +1026,7 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
*/ */
static inline int task_cputime_zero(const struct task_cputime *cputime) static inline int task_cputime_zero(const struct task_cputime *cputime)
{ {
if (cputime_eq(cputime->utime, cputime_zero) && if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
cputime_eq(cputime->stime, cputime_zero) &&
cputime->sum_exec_runtime == 0)
return 1; return 1;
return 0; return 0;
} }
...@@ -1076,15 +1052,15 @@ static void check_process_timers(struct task_struct *tsk, ...@@ -1076,15 +1052,15 @@ static void check_process_timers(struct task_struct *tsk,
*/ */
thread_group_cputimer(tsk, &cputime); thread_group_cputimer(tsk, &cputime);
utime = cputime.utime; utime = cputime.utime;
ptime = cputime_add(utime, cputime.stime); ptime = utime + cputime.stime;
sum_sched_runtime = cputime.sum_exec_runtime; sum_sched_runtime = cputime.sum_exec_runtime;
maxfire = 20; maxfire = 20;
prof_expires = cputime_zero; prof_expires = 0;
while (!list_empty(timers)) { while (!list_empty(timers)) {
struct cpu_timer_list *tl = list_first_entry(timers, struct cpu_timer_list *tl = list_first_entry(timers,
struct cpu_timer_list, struct cpu_timer_list,
entry); entry);
if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) { if (!--maxfire || ptime < tl->expires.cpu) {
prof_expires = tl->expires.cpu; prof_expires = tl->expires.cpu;
break; break;
} }
...@@ -1094,12 +1070,12 @@ static void check_process_timers(struct task_struct *tsk, ...@@ -1094,12 +1070,12 @@ static void check_process_timers(struct task_struct *tsk,
++timers; ++timers;
maxfire = 20; maxfire = 20;
virt_expires = cputime_zero; virt_expires = 0;
while (!list_empty(timers)) { while (!list_empty(timers)) {
struct cpu_timer_list *tl = list_first_entry(timers, struct cpu_timer_list *tl = list_first_entry(timers,
struct cpu_timer_list, struct cpu_timer_list,
entry); entry);
if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) { if (!--maxfire || utime < tl->expires.cpu) {
virt_expires = tl->expires.cpu; virt_expires = tl->expires.cpu;
break; break;
} }
...@@ -1154,8 +1130,7 @@ static void check_process_timers(struct task_struct *tsk, ...@@ -1154,8 +1130,7 @@ static void check_process_timers(struct task_struct *tsk,
} }
} }
x = secs_to_cputime(soft); x = secs_to_cputime(soft);
if (cputime_eq(prof_expires, cputime_zero) || if (!prof_expires || x < prof_expires) {
cputime_lt(x, prof_expires)) {
prof_expires = x; prof_expires = x;
} }
} }
...@@ -1249,12 +1224,9 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) ...@@ -1249,12 +1224,9 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
static inline int task_cputime_expired(const struct task_cputime *sample, static inline int task_cputime_expired(const struct task_cputime *sample,
const struct task_cputime *expires) const struct task_cputime *expires)
{ {
if (!cputime_eq(expires->utime, cputime_zero) && if (expires->utime && sample->utime >= expires->utime)
cputime_ge(sample->utime, expires->utime))
return 1; return 1;
if (!cputime_eq(expires->stime, cputime_zero) && if (expires->stime && sample->utime + sample->stime >= expires->stime)
cputime_ge(cputime_add(sample->utime, sample->stime),
expires->stime))
return 1; return 1;
if (expires->sum_exec_runtime != 0 && if (expires->sum_exec_runtime != 0 &&
sample->sum_exec_runtime >= expires->sum_exec_runtime) sample->sum_exec_runtime >= expires->sum_exec_runtime)
...@@ -1389,18 +1361,18 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, ...@@ -1389,18 +1361,18 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
* it to be relative, *newval argument is relative and we update * it to be relative, *newval argument is relative and we update
* it to be absolute. * it to be absolute.
*/ */
if (!cputime_eq(*oldval, cputime_zero)) { if (*oldval) {
if (cputime_le(*oldval, now.cpu)) { if (*oldval <= now.cpu) {
/* Just about to fire. */ /* Just about to fire. */
*oldval = cputime_one_jiffy; *oldval = cputime_one_jiffy;
} else { } else {
*oldval = cputime_sub(*oldval, now.cpu); *oldval -= now.cpu;
} }
} }
if (cputime_eq(*newval, cputime_zero)) if (!*newval)
return; return;
*newval = cputime_add(*newval, now.cpu); *newval += now.cpu;
} }
/* /*
......
...@@ -2166,7 +2166,7 @@ static int irqtime_account_hi_update(void) ...@@ -2166,7 +2166,7 @@ static int irqtime_account_hi_update(void)
local_irq_save(flags); local_irq_save(flags);
latest_ns = this_cpu_read(cpu_hardirq_time); latest_ns = this_cpu_read(cpu_hardirq_time);
if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq)) if (nsecs_to_cputime64(latest_ns) > cpustat->irq)
ret = 1; ret = 1;
local_irq_restore(flags); local_irq_restore(flags);
return ret; return ret;
...@@ -2181,7 +2181,7 @@ static int irqtime_account_si_update(void) ...@@ -2181,7 +2181,7 @@ static int irqtime_account_si_update(void)
local_irq_save(flags); local_irq_save(flags);
latest_ns = this_cpu_read(cpu_softirq_time); latest_ns = this_cpu_read(cpu_softirq_time);
if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq)) if (nsecs_to_cputime64(latest_ns) > cpustat->softirq)
ret = 1; ret = 1;
local_irq_restore(flags); local_irq_restore(flags);
return ret; return ret;
...@@ -3868,19 +3868,17 @@ void account_user_time(struct task_struct *p, cputime_t cputime, ...@@ -3868,19 +3868,17 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled) cputime_t cputime_scaled)
{ {
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t tmp;
/* Add user time to process. */ /* Add user time to process. */
p->utime = cputime_add(p->utime, cputime); p->utime += cputime;
p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); p->utimescaled += cputime_scaled;
account_group_user_time(p, cputime); account_group_user_time(p, cputime);
/* Add user time to cpustat. */ /* Add user time to cpustat. */
tmp = cputime_to_cputime64(cputime);
if (TASK_NICE(p) > 0) if (TASK_NICE(p) > 0)
cpustat->nice = cputime64_add(cpustat->nice, tmp); cpustat->nice += (__force cputime64_t) cputime;
else else
cpustat->user = cputime64_add(cpustat->user, tmp); cpustat->user += (__force cputime64_t) cputime;
cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime); cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
/* Account for user time used */ /* Account for user time used */
...@@ -3896,24 +3894,21 @@ void account_user_time(struct task_struct *p, cputime_t cputime, ...@@ -3896,24 +3894,21 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
static void account_guest_time(struct task_struct *p, cputime_t cputime, static void account_guest_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled) cputime_t cputime_scaled)
{ {
cputime64_t tmp;
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
tmp = cputime_to_cputime64(cputime);
/* Add guest time to process. */ /* Add guest time to process. */
p->utime = cputime_add(p->utime, cputime); p->utime += cputime;
p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); p->utimescaled += cputime_scaled;
account_group_user_time(p, cputime); account_group_user_time(p, cputime);
p->gtime = cputime_add(p->gtime, cputime); p->gtime += cputime;
/* Add guest time to cpustat. */ /* Add guest time to cpustat. */
if (TASK_NICE(p) > 0) { if (TASK_NICE(p) > 0) {
cpustat->nice = cputime64_add(cpustat->nice, tmp); cpustat->nice += (__force cputime64_t) cputime;
cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp); cpustat->guest_nice += (__force cputime64_t) cputime;
} else { } else {
cpustat->user = cputime64_add(cpustat->user, tmp); cpustat->user += (__force cputime64_t) cputime;
cpustat->guest = cputime64_add(cpustat->guest, tmp); cpustat->guest += (__force cputime64_t) cputime;
} }
} }
...@@ -3928,15 +3923,13 @@ static inline ...@@ -3928,15 +3923,13 @@ static inline
void __account_system_time(struct task_struct *p, cputime_t cputime, void __account_system_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled, cputime64_t *target_cputime64) cputime_t cputime_scaled, cputime64_t *target_cputime64)
{ {
cputime64_t tmp = cputime_to_cputime64(cputime);
/* Add system time to process. */ /* Add system time to process. */
p->stime = cputime_add(p->stime, cputime); p->stime += cputime;
p->stimescaled = cputime_add(p->stimescaled, cputime_scaled); p->stimescaled += cputime_scaled;
account_group_system_time(p, cputime); account_group_system_time(p, cputime);
/* Add system time to cpustat. */ /* Add system time to cpustat. */
*target_cputime64 = cputime64_add(*target_cputime64, tmp); *target_cputime64 += (__force cputime64_t) cputime;
cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
/* Account for system time used */ /* Account for system time used */
...@@ -3978,9 +3971,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset, ...@@ -3978,9 +3971,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
void account_steal_time(cputime_t cputime) void account_steal_time(cputime_t cputime)
{ {
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t cputime64 = cputime_to_cputime64(cputime);
cpustat->steal = cputime64_add(cpustat->steal, cputime64); cpustat->steal += (__force cputime64_t) cputime;
} }
/* /*
...@@ -3990,13 +3982,12 @@ void account_steal_time(cputime_t cputime) ...@@ -3990,13 +3982,12 @@ void account_steal_time(cputime_t cputime)
void account_idle_time(cputime_t cputime) void account_idle_time(cputime_t cputime)
{ {
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t cputime64 = cputime_to_cputime64(cputime);
struct rq *rq = this_rq(); struct rq *rq = this_rq();
if (atomic_read(&rq->nr_iowait) > 0) if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); cpustat->iowait += (__force cputime64_t) cputime;
else else
cpustat->idle = cputime64_add(cpustat->idle, cputime64); cpustat->idle += (__force cputime64_t) cputime;
} }
static __always_inline bool steal_account_process_tick(void) static __always_inline bool steal_account_process_tick(void)
...@@ -4046,16 +4037,15 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, ...@@ -4046,16 +4037,15 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
struct rq *rq) struct rq *rq)
{ {
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
if (steal_account_process_tick()) if (steal_account_process_tick())
return; return;
if (irqtime_account_hi_update()) { if (irqtime_account_hi_update()) {
cpustat->irq = cputime64_add(cpustat->irq, tmp); cpustat->irq += (__force cputime64_t) cputime_one_jiffy;
} else if (irqtime_account_si_update()) { } else if (irqtime_account_si_update()) {
cpustat->softirq = cputime64_add(cpustat->softirq, tmp); cpustat->softirq += (__force cputime64_t) cputime_one_jiffy;
} else if (this_cpu_ksoftirqd() == p) { } else if (this_cpu_ksoftirqd() == p) {
/* /*
* ksoftirqd time do not get accounted in cpu_softirq_time. * ksoftirqd time do not get accounted in cpu_softirq_time.
...@@ -4171,7 +4161,7 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) ...@@ -4171,7 +4161,7 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
{ {
cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime); cputime_t rtime, utime = p->utime, total = utime + p->stime;
/* /*
* Use CFS's precise accounting: * Use CFS's precise accounting:
...@@ -4179,11 +4169,11 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) ...@@ -4179,11 +4169,11 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
rtime = nsecs_to_cputime(p->se.sum_exec_runtime); rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
if (total) { if (total) {
u64 temp = rtime; u64 temp = (__force u64) rtime;
temp *= utime; temp *= (__force u64) utime;
do_div(temp, total); do_div(temp, (__force u32) total);
utime = (cputime_t)temp; utime = (__force cputime_t) temp;
} else } else
utime = rtime; utime = rtime;
...@@ -4191,7 +4181,7 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) ...@@ -4191,7 +4181,7 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
* Compare with previous values, to keep monotonicity: * Compare with previous values, to keep monotonicity:
*/ */
p->prev_utime = max(p->prev_utime, utime); p->prev_utime = max(p->prev_utime, utime);
p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime)); p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
*ut = p->prev_utime; *ut = p->prev_utime;
*st = p->prev_stime; *st = p->prev_stime;
...@@ -4208,21 +4198,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) ...@@ -4208,21 +4198,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
thread_group_cputime(p, &cputime); thread_group_cputime(p, &cputime);
total = cputime_add(cputime.utime, cputime.stime); total = cputime.utime + cputime.stime;
rtime = nsecs_to_cputime(cputime.sum_exec_runtime); rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
if (total) { if (total) {
u64 temp = rtime; u64 temp = (__force u64) rtime;
temp *= cputime.utime; temp *= (__force u64) cputime.utime;
do_div(temp, total); do_div(temp, (__force u32) total);
utime = (cputime_t)temp; utime = (__force cputime_t) temp;
} else } else
utime = rtime; utime = rtime;
sig->prev_utime = max(sig->prev_utime, utime); sig->prev_utime = max(sig->prev_utime, utime);
sig->prev_stime = max(sig->prev_stime, sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime);
cputime_sub(rtime, sig->prev_utime));
*ut = sig->prev_utime; *ut = sig->prev_utime;
*st = sig->prev_stime; *st = sig->prev_stime;
...@@ -9769,7 +9758,8 @@ static void cpuacct_update_stats(struct task_struct *tsk, ...@@ -9769,7 +9758,8 @@ static void cpuacct_update_stats(struct task_struct *tsk,
ca = task_ca(tsk); ca = task_ca(tsk);
do { do {
__percpu_counter_add(&ca->cpustat[idx], val, batch); __percpu_counter_add(&ca->cpustat[idx],
(__force s64) val, batch);
ca = ca->parent; ca = ca->parent;
} while (ca); } while (ca);
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -283,8 +283,7 @@ static inline void account_group_user_time(struct task_struct *tsk, ...@@ -283,8 +283,7 @@ static inline void account_group_user_time(struct task_struct *tsk,
return; return;
raw_spin_lock(&cputimer->lock); raw_spin_lock(&cputimer->lock);
cputimer->cputime.utime = cputimer->cputime.utime += cputime;
cputime_add(cputimer->cputime.utime, cputime);
raw_spin_unlock(&cputimer->lock); raw_spin_unlock(&cputimer->lock);
} }
...@@ -307,8 +306,7 @@ static inline void account_group_system_time(struct task_struct *tsk, ...@@ -307,8 +306,7 @@ static inline void account_group_system_time(struct task_struct *tsk,
return; return;
raw_spin_lock(&cputimer->lock); raw_spin_lock(&cputimer->lock);
cputimer->cputime.stime = cputimer->cputime.stime += cputime;
cputime_add(cputimer->cputime.stime, cputime);
raw_spin_unlock(&cputimer->lock); raw_spin_unlock(&cputimer->lock);
} }
......
...@@ -1629,10 +1629,8 @@ bool do_notify_parent(struct task_struct *tsk, int sig) ...@@ -1629,10 +1629,8 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
info.si_uid = __task_cred(tsk)->uid; info.si_uid = __task_cred(tsk)->uid;
rcu_read_unlock(); rcu_read_unlock();
info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime);
tsk->signal->utime)); info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime);
info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
tsk->signal->stime));
info.si_status = tsk->exit_code & 0x7f; info.si_status = tsk->exit_code & 0x7f;
if (tsk->exit_code & 0x80) if (tsk->exit_code & 0x80)
......
...@@ -1605,7 +1605,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) ...@@ -1605,7 +1605,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
unsigned long maxrss = 0; unsigned long maxrss = 0;
memset((char *) r, 0, sizeof *r); memset((char *) r, 0, sizeof *r);
utime = stime = cputime_zero; utime = stime = 0;
if (who == RUSAGE_THREAD) { if (who == RUSAGE_THREAD) {
task_times(current, &utime, &stime); task_times(current, &utime, &stime);
...@@ -1635,8 +1635,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) ...@@ -1635,8 +1635,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
case RUSAGE_SELF: case RUSAGE_SELF:
thread_group_times(p, &tgutime, &tgstime); thread_group_times(p, &tgutime, &tgstime);
utime = cputime_add(utime, tgutime); utime += tgutime;
stime = cputime_add(stime, tgstime); stime += tgstime;
r->ru_nvcsw += p->signal->nvcsw; r->ru_nvcsw += p->signal->nvcsw;
r->ru_nivcsw += p->signal->nivcsw; r->ru_nivcsw += p->signal->nivcsw;
r->ru_minflt += p->signal->min_flt; r->ru_minflt += p->signal->min_flt;
......
...@@ -127,7 +127,7 @@ void acct_update_integrals(struct task_struct *tsk) ...@@ -127,7 +127,7 @@ void acct_update_integrals(struct task_struct *tsk)
local_irq_save(flags); local_irq_save(flags);
time = tsk->stime + tsk->utime; time = tsk->stime + tsk->utime;
dtime = cputime_sub(time, tsk->acct_timexpd); dtime = time - tsk->acct_timexpd;
jiffies_to_timeval(cputime_to_jiffies(dtime), &value); jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
delta = value.tv_sec; delta = value.tv_sec;
delta = delta * USEC_PER_SEC + value.tv_usec; delta = delta * USEC_PER_SEC + value.tv_usec;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment