Commit 61420f59 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'cputime' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'cputime' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
  [PATCH] fast vdso implementation for CLOCK_THREAD_CPUTIME_ID
  [PATCH] improve idle cputime accounting
  [PATCH] improve precision of idle time detection.
  [PATCH] improve precision of process accounting.
  [PATCH] idle cputime accounting
  [PATCH] fix scaled & unscaled cputime accounting
parents d97106ab c742b31c
...@@ -93,13 +93,14 @@ void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next) ...@@ -93,13 +93,14 @@ void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next)
now = ia64_get_itc(); now = ia64_get_itc();
delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp)); delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp));
account_system_time(prev, 0, delta_stime); if (idle_task(smp_processor_id()) != prev)
account_system_time_scaled(prev, delta_stime); account_system_time(prev, 0, delta_stime, delta_stime);
else
account_idle_time(delta_stime);
if (pi->ac_utime) { if (pi->ac_utime) {
delta_utime = cycle_to_cputime(pi->ac_utime); delta_utime = cycle_to_cputime(pi->ac_utime);
account_user_time(prev, delta_utime); account_user_time(prev, delta_utime, delta_utime);
account_user_time_scaled(prev, delta_utime);
} }
pi->ac_stamp = ni->ac_stamp = now; pi->ac_stamp = ni->ac_stamp = now;
...@@ -122,8 +123,10 @@ void account_system_vtime(struct task_struct *tsk) ...@@ -122,8 +123,10 @@ void account_system_vtime(struct task_struct *tsk)
now = ia64_get_itc(); now = ia64_get_itc();
delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
account_system_time(tsk, 0, delta_stime); if (irq_count() || idle_task(smp_processor_id()) != tsk)
account_system_time_scaled(tsk, delta_stime); account_system_time(tsk, 0, delta_stime, delta_stime);
else
account_idle_time(delta_stime);
ti->ac_stime = 0; ti->ac_stime = 0;
ti->ac_stamp = now; ti->ac_stamp = now;
...@@ -143,8 +146,7 @@ void account_process_tick(struct task_struct *p, int user_tick) ...@@ -143,8 +146,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
if (ti->ac_utime) { if (ti->ac_utime) {
delta_utime = cycle_to_cputime(ti->ac_utime); delta_utime = cycle_to_cputime(ti->ac_utime);
account_user_time(p, delta_utime); account_user_time(p, delta_utime, delta_utime);
account_user_time_scaled(p, delta_utime);
ti->ac_utime = 0; ti->ac_utime = 0;
} }
} }
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/mqueue.h> #include <linux/mqueue.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/kernel_stat.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
......
...@@ -256,8 +256,10 @@ void account_system_vtime(struct task_struct *tsk) ...@@ -256,8 +256,10 @@ void account_system_vtime(struct task_struct *tsk)
delta += sys_time; delta += sys_time;
get_paca()->system_time = 0; get_paca()->system_time = 0;
} }
account_system_time(tsk, 0, delta); if (in_irq() || idle_task(smp_processor_id()) != tsk)
account_system_time_scaled(tsk, deltascaled); account_system_time(tsk, 0, delta, deltascaled);
else
account_idle_time(delta);
per_cpu(cputime_last_delta, smp_processor_id()) = delta; per_cpu(cputime_last_delta, smp_processor_id()) = delta;
per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled; per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled;
local_irq_restore(flags); local_irq_restore(flags);
...@@ -275,10 +277,8 @@ void account_process_tick(struct task_struct *tsk, int user_tick) ...@@ -275,10 +277,8 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
utime = get_paca()->user_time; utime = get_paca()->user_time;
get_paca()->user_time = 0; get_paca()->user_time = 0;
account_user_time(tsk, utime);
utimescaled = cputime_to_scaled(utime); utimescaled = cputime_to_scaled(utime);
account_user_time_scaled(tsk, utimescaled); account_user_time(tsk, utime, utimescaled);
} }
/* /*
...@@ -338,8 +338,12 @@ void calculate_steal_time(void) ...@@ -338,8 +338,12 @@ void calculate_steal_time(void)
tb = mftb(); tb = mftb();
purr = mfspr(SPRN_PURR); purr = mfspr(SPRN_PURR);
stolen = (tb - pme->tb) - (purr - pme->purr); stolen = (tb - pme->tb) - (purr - pme->purr);
if (stolen > 0) if (stolen > 0) {
account_steal_time(current, stolen); if (idle_task(smp_processor_id()) != current)
account_steal_time(stolen);
else
account_idle_time(stolen);
}
pme->tb = tb; pme->tb = tb;
pme->purr = purr; pme->purr = purr;
} }
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
struct s390_idle_data { struct s390_idle_data {
spinlock_t lock; spinlock_t lock;
unsigned int in_idle;
unsigned long long idle_count; unsigned long long idle_count;
unsigned long long idle_enter; unsigned long long idle_enter;
unsigned long long idle_time; unsigned long long idle_time;
...@@ -22,12 +21,12 @@ struct s390_idle_data { ...@@ -22,12 +21,12 @@ struct s390_idle_data {
DECLARE_PER_CPU(struct s390_idle_data, s390_idle); DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
void s390_idle_leave(void); void vtime_start_cpu(void);
static inline void s390_idle_check(void) static inline void s390_idle_check(void)
{ {
if ((&__get_cpu_var(s390_idle))->in_idle) if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL)
s390_idle_leave(); vtime_start_cpu();
} }
#endif /* _ASM_S390_CPU_H_ */ #endif /* _ASM_S390_CPU_H_ */
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include <asm/div64.h> #include <asm/div64.h>
/* We want to use micro-second resolution. */ /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
typedef unsigned long long cputime_t; typedef unsigned long long cputime_t;
typedef unsigned long long cputime64_t; typedef unsigned long long cputime64_t;
...@@ -53,9 +53,9 @@ __div(unsigned long long n, unsigned int base) ...@@ -53,9 +53,9 @@ __div(unsigned long long n, unsigned int base)
#define cputime_ge(__a, __b) ((__a) >= (__b)) #define cputime_ge(__a, __b) ((__a) >= (__b))
#define cputime_lt(__a, __b) ((__a) < (__b)) #define cputime_lt(__a, __b) ((__a) < (__b))
#define cputime_le(__a, __b) ((__a) <= (__b)) #define cputime_le(__a, __b) ((__a) <= (__b))
#define cputime_to_jiffies(__ct) (__div((__ct), 1000000 / HZ)) #define cputime_to_jiffies(__ct) (__div((__ct), 4096000000ULL / HZ))
#define cputime_to_scaled(__ct) (__ct) #define cputime_to_scaled(__ct) (__ct)
#define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (1000000 / HZ)) #define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (4096000000ULL / HZ))
#define cputime64_zero (0ULL) #define cputime64_zero (0ULL)
#define cputime64_add(__a, __b) ((__a) + (__b)) #define cputime64_add(__a, __b) ((__a) + (__b))
...@@ -64,7 +64,7 @@ __div(unsigned long long n, unsigned int base) ...@@ -64,7 +64,7 @@ __div(unsigned long long n, unsigned int base)
static inline u64 static inline u64
cputime64_to_jiffies64(cputime64_t cputime) cputime64_to_jiffies64(cputime64_t cputime)
{ {
do_div(cputime, 1000000 / HZ); do_div(cputime, 4096000000ULL / HZ);
return cputime; return cputime;
} }
...@@ -74,13 +74,13 @@ cputime64_to_jiffies64(cputime64_t cputime) ...@@ -74,13 +74,13 @@ cputime64_to_jiffies64(cputime64_t cputime)
static inline unsigned int static inline unsigned int
cputime_to_msecs(const cputime_t cputime) cputime_to_msecs(const cputime_t cputime)
{ {
return __div(cputime, 1000); return __div(cputime, 4096000);
} }
static inline cputime_t static inline cputime_t
msecs_to_cputime(const unsigned int m) msecs_to_cputime(const unsigned int m)
{ {
return (cputime_t) m * 1000; return (cputime_t) m * 4096000;
} }
/* /*
...@@ -89,13 +89,13 @@ msecs_to_cputime(const unsigned int m) ...@@ -89,13 +89,13 @@ msecs_to_cputime(const unsigned int m)
static inline unsigned int static inline unsigned int
cputime_to_secs(const cputime_t cputime) cputime_to_secs(const cputime_t cputime)
{ {
return __div(cputime, 1000000); return __div(cputime, 2048000000) >> 1;
} }
static inline cputime_t static inline cputime_t
secs_to_cputime(const unsigned int s) secs_to_cputime(const unsigned int s)
{ {
return (cputime_t) s * 1000000; return (cputime_t) s * 4096000000ULL;
} }
/* /*
...@@ -104,7 +104,7 @@ secs_to_cputime(const unsigned int s) ...@@ -104,7 +104,7 @@ secs_to_cputime(const unsigned int s)
static inline cputime_t static inline cputime_t
timespec_to_cputime(const struct timespec *value) timespec_to_cputime(const struct timespec *value)
{ {
return value->tv_nsec / 1000 + (u64) value->tv_sec * 1000000; return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL;
} }
static inline void static inline void
...@@ -114,12 +114,12 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value) ...@@ -114,12 +114,12 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value)
register_pair rp; register_pair rp;
rp.pair = cputime >> 1; rp.pair = cputime >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (1000000 >> 1)); asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
value->tv_nsec = rp.subreg.even * 1000; value->tv_nsec = rp.subreg.even * 1000 / 4096;
value->tv_sec = rp.subreg.odd; value->tv_sec = rp.subreg.odd;
#else #else
value->tv_nsec = (cputime % 1000000) * 1000; value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096;
value->tv_sec = cputime / 1000000; value->tv_sec = cputime / 4096000000ULL;
#endif #endif
} }
...@@ -131,7 +131,7 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value) ...@@ -131,7 +131,7 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value)
static inline cputime_t static inline cputime_t
timeval_to_cputime(const struct timeval *value) timeval_to_cputime(const struct timeval *value)
{ {
return value->tv_usec + (u64) value->tv_sec * 1000000; return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL;
} }
static inline void static inline void
...@@ -141,12 +141,12 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value) ...@@ -141,12 +141,12 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value)
register_pair rp; register_pair rp;
rp.pair = cputime >> 1; rp.pair = cputime >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (1000000 >> 1)); asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
value->tv_usec = rp.subreg.even; value->tv_usec = rp.subreg.even / 4096;
value->tv_sec = rp.subreg.odd; value->tv_sec = rp.subreg.odd;
#else #else
value->tv_usec = cputime % 1000000; value->tv_usec = cputime % 4096000000ULL;
value->tv_sec = cputime / 1000000; value->tv_sec = cputime / 4096000000ULL;
#endif #endif
} }
...@@ -156,13 +156,13 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value) ...@@ -156,13 +156,13 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value)
static inline clock_t static inline clock_t
cputime_to_clock_t(cputime_t cputime) cputime_to_clock_t(cputime_t cputime)
{ {
return __div(cputime, 1000000 / USER_HZ); return __div(cputime, 4096000000ULL / USER_HZ);
} }
static inline cputime_t static inline cputime_t
clock_t_to_cputime(unsigned long x) clock_t_to_cputime(unsigned long x)
{ {
return (cputime_t) x * (1000000 / USER_HZ); return (cputime_t) x * (4096000000ULL / USER_HZ);
} }
/* /*
...@@ -171,7 +171,7 @@ clock_t_to_cputime(unsigned long x) ...@@ -171,7 +171,7 @@ clock_t_to_cputime(unsigned long x)
static inline clock_t static inline clock_t
cputime64_to_clock_t(cputime64_t cputime) cputime64_to_clock_t(cputime64_t cputime)
{ {
return __div(cputime, 1000000 / USER_HZ); return __div(cputime, 4096000000ULL / USER_HZ);
} }
#endif /* _S390_CPUTIME_H */ #endif /* _S390_CPUTIME_H */
...@@ -67,11 +67,11 @@ ...@@ -67,11 +67,11 @@
#define __LC_SYNC_ENTER_TIMER 0x248 #define __LC_SYNC_ENTER_TIMER 0x248
#define __LC_ASYNC_ENTER_TIMER 0x250 #define __LC_ASYNC_ENTER_TIMER 0x250
#define __LC_EXIT_TIMER 0x258 #define __LC_EXIT_TIMER 0x258
#define __LC_LAST_UPDATE_TIMER 0x260 #define __LC_USER_TIMER 0x260
#define __LC_USER_TIMER 0x268 #define __LC_SYSTEM_TIMER 0x268
#define __LC_SYSTEM_TIMER 0x270 #define __LC_STEAL_TIMER 0x270
#define __LC_LAST_UPDATE_CLOCK 0x278 #define __LC_LAST_UPDATE_TIMER 0x278
#define __LC_STEAL_CLOCK 0x280 #define __LC_LAST_UPDATE_CLOCK 0x280
#define __LC_RETURN_MCCK_PSW 0x288 #define __LC_RETURN_MCCK_PSW 0x288
#define __LC_KERNEL_STACK 0xC40 #define __LC_KERNEL_STACK 0xC40
#define __LC_THREAD_INFO 0xC44 #define __LC_THREAD_INFO 0xC44
...@@ -89,11 +89,11 @@ ...@@ -89,11 +89,11 @@
#define __LC_SYNC_ENTER_TIMER 0x250 #define __LC_SYNC_ENTER_TIMER 0x250
#define __LC_ASYNC_ENTER_TIMER 0x258 #define __LC_ASYNC_ENTER_TIMER 0x258
#define __LC_EXIT_TIMER 0x260 #define __LC_EXIT_TIMER 0x260
#define __LC_LAST_UPDATE_TIMER 0x268 #define __LC_USER_TIMER 0x268
#define __LC_USER_TIMER 0x270 #define __LC_SYSTEM_TIMER 0x270
#define __LC_SYSTEM_TIMER 0x278 #define __LC_STEAL_TIMER 0x278
#define __LC_LAST_UPDATE_CLOCK 0x280 #define __LC_LAST_UPDATE_TIMER 0x280
#define __LC_STEAL_CLOCK 0x288 #define __LC_LAST_UPDATE_CLOCK 0x288
#define __LC_RETURN_MCCK_PSW 0x290 #define __LC_RETURN_MCCK_PSW 0x290
#define __LC_KERNEL_STACK 0xD40 #define __LC_KERNEL_STACK 0xD40
#define __LC_THREAD_INFO 0xD48 #define __LC_THREAD_INFO 0xD48
...@@ -106,8 +106,10 @@ ...@@ -106,8 +106,10 @@
#define __LC_IPLDEV 0xDB8 #define __LC_IPLDEV 0xDB8
#define __LC_CURRENT 0xDD8 #define __LC_CURRENT 0xDD8
#define __LC_INT_CLOCK 0xDE8 #define __LC_INT_CLOCK 0xDE8
#define __LC_VDSO_PER_CPU 0xE38
#endif /* __s390x__ */ #endif /* __s390x__ */
#define __LC_PASTE 0xE40
#define __LC_PANIC_MAGIC 0xE00 #define __LC_PANIC_MAGIC 0xE00
#ifndef __s390x__ #ifndef __s390x__
...@@ -252,11 +254,11 @@ struct _lowcore ...@@ -252,11 +254,11 @@ struct _lowcore
__u64 sync_enter_timer; /* 0x248 */ __u64 sync_enter_timer; /* 0x248 */
__u64 async_enter_timer; /* 0x250 */ __u64 async_enter_timer; /* 0x250 */
__u64 exit_timer; /* 0x258 */ __u64 exit_timer; /* 0x258 */
__u64 last_update_timer; /* 0x260 */ __u64 user_timer; /* 0x260 */
__u64 user_timer; /* 0x268 */ __u64 system_timer; /* 0x268 */
__u64 system_timer; /* 0x270 */ __u64 steal_timer; /* 0x270 */
__u64 last_update_clock; /* 0x278 */ __u64 last_update_timer; /* 0x278 */
__u64 steal_clock; /* 0x280 */ __u64 last_update_clock; /* 0x280 */
psw_t return_mcck_psw; /* 0x288 */ psw_t return_mcck_psw; /* 0x288 */
__u8 pad8[0xc00-0x290]; /* 0x290 */ __u8 pad8[0xc00-0x290]; /* 0x290 */
...@@ -343,11 +345,11 @@ struct _lowcore ...@@ -343,11 +345,11 @@ struct _lowcore
__u64 sync_enter_timer; /* 0x250 */ __u64 sync_enter_timer; /* 0x250 */
__u64 async_enter_timer; /* 0x258 */ __u64 async_enter_timer; /* 0x258 */
__u64 exit_timer; /* 0x260 */ __u64 exit_timer; /* 0x260 */
__u64 last_update_timer; /* 0x268 */ __u64 user_timer; /* 0x268 */
__u64 user_timer; /* 0x270 */ __u64 system_timer; /* 0x270 */
__u64 system_timer; /* 0x278 */ __u64 steal_timer; /* 0x278 */
__u64 last_update_clock; /* 0x280 */ __u64 last_update_timer; /* 0x280 */
__u64 steal_clock; /* 0x288 */ __u64 last_update_clock; /* 0x288 */
psw_t return_mcck_psw; /* 0x290 */ psw_t return_mcck_psw; /* 0x290 */
__u8 pad8[0xc00-0x2a0]; /* 0x2a0 */ __u8 pad8[0xc00-0x2a0]; /* 0x2a0 */
/* System info area */ /* System info area */
...@@ -381,7 +383,12 @@ struct _lowcore ...@@ -381,7 +383,12 @@ struct _lowcore
/* whether the kernel died with panic() or not */ /* whether the kernel died with panic() or not */
__u32 panic_magic; /* 0xe00 */ __u32 panic_magic; /* 0xe00 */
__u8 pad13[0x11b8-0xe04]; /* 0xe04 */ /* Per cpu primary space access list */
__u8 pad_0xe04[0xe3c-0xe04]; /* 0xe04 */
__u32 vdso_per_cpu_data; /* 0xe3c */
__u32 paste[16]; /* 0xe40 */
__u8 pad13[0x11b8-0xe80]; /* 0xe80 */
/* 64 bit extparam used for pfault, diag 250 etc */ /* 64 bit extparam used for pfault, diag 250 etc */
__u64 ext_params2; /* 0x11B8 */ __u64 ext_params2; /* 0x11B8 */
......
...@@ -99,7 +99,7 @@ static inline void restore_access_regs(unsigned int *acrs) ...@@ -99,7 +99,7 @@ static inline void restore_access_regs(unsigned int *acrs)
prev = __switch_to(prev,next); \ prev = __switch_to(prev,next); \
} while (0) } while (0)
extern void account_vtime(struct task_struct *); extern void account_vtime(struct task_struct *, struct task_struct *);
extern void account_tick_vtime(struct task_struct *); extern void account_tick_vtime(struct task_struct *);
extern void account_system_vtime(struct task_struct *); extern void account_system_vtime(struct task_struct *);
...@@ -121,7 +121,7 @@ static inline void cmma_init(void) { } ...@@ -121,7 +121,7 @@ static inline void cmma_init(void) { }
#define finish_arch_switch(prev) do { \ #define finish_arch_switch(prev) do { \
set_fs(current->thread.mm_segment); \ set_fs(current->thread.mm_segment); \
account_vtime(prev); \ account_vtime(prev, current); \
} while (0) } while (0)
#define nop() asm volatile("nop") #define nop() asm volatile("nop")
......
...@@ -47,6 +47,8 @@ struct thread_info { ...@@ -47,6 +47,8 @@ struct thread_info {
unsigned int cpu; /* current CPU */ unsigned int cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */ int preempt_count; /* 0 => preemptable, <0 => BUG */
struct restart_block restart_block; struct restart_block restart_block;
__u64 user_timer;
__u64 system_timer;
}; };
/* /*
......
...@@ -23,20 +23,18 @@ struct vtimer_list { ...@@ -23,20 +23,18 @@ struct vtimer_list {
__u64 expires; __u64 expires;
__u64 interval; __u64 interval;
spinlock_t lock;
unsigned long magic;
void (*function)(unsigned long); void (*function)(unsigned long);
unsigned long data; unsigned long data;
}; };
/* the offset value will wrap after ca. 71 years */ /* the vtimer value will wrap after ca. 71 years */
struct vtimer_queue { struct vtimer_queue {
struct list_head list; struct list_head list;
spinlock_t lock; spinlock_t lock;
__u64 to_expire; /* current event expire time */ __u64 timer; /* last programmed timer */
__u64 offset; /* list offset to zero */ __u64 elapsed; /* elapsed time of timer expire values */
__u64 idle; /* temp var for idle */ __u64 idle; /* temp var for idle */
int do_spt; /* =1: reprogram cpu timer in idle */
}; };
extern void init_virt_timer(struct vtimer_list *timer); extern void init_virt_timer(struct vtimer_list *timer);
...@@ -48,8 +46,8 @@ extern int del_virt_timer(struct vtimer_list *timer); ...@@ -48,8 +46,8 @@ extern int del_virt_timer(struct vtimer_list *timer);
extern void init_cpu_vtimer(void); extern void init_cpu_vtimer(void);
extern void vtime_init(void); extern void vtime_init(void);
extern void vtime_start_cpu_timer(void); extern void vtime_stop_cpu(void);
extern void vtime_stop_cpu_timer(void); extern void vtime_start_leave(void);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -12,9 +12,9 @@ ...@@ -12,9 +12,9 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* /*
* Note about this structure: * Note about the vdso_data and vdso_per_cpu_data structures:
* *
* NEVER USE THIS IN USERSPACE CODE DIRECTLY. The layout of this * NEVER USE THEM IN USERSPACE CODE DIRECTLY. The layout of the
* structure is supposed to be known only to the function in the vdso * structure is supposed to be known only to the function in the vdso
* itself and may change without notice. * itself and may change without notice.
*/ */
...@@ -28,10 +28,21 @@ struct vdso_data { ...@@ -28,10 +28,21 @@ struct vdso_data {
__u64 wtom_clock_nsec; /* 0x28 */ __u64 wtom_clock_nsec; /* 0x28 */
__u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
__u32 tz_dsttime; /* Type of dst correction 0x34 */ __u32 tz_dsttime; /* Type of dst correction 0x34 */
__u32 ectg_available;
};
struct vdso_per_cpu_data {
__u64 ectg_timer_base;
__u64 ectg_user_time;
}; };
extern struct vdso_data *vdso_data; extern struct vdso_data *vdso_data;
#ifdef CONFIG_64BIT
int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore);
void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore);
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -48,6 +48,11 @@ int main(void) ...@@ -48,6 +48,11 @@ int main(void)
DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec)); DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec));
DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
DEFINE(__VDSO_ECTG_BASE,
offsetof(struct vdso_per_cpu_data, ectg_timer_base));
DEFINE(__VDSO_ECTG_USER,
offsetof(struct vdso_per_cpu_data, ectg_user_time));
/* constants used by the vdso */ /* constants used by the vdso */
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
......
...@@ -583,8 +583,8 @@ kernel_per: ...@@ -583,8 +583,8 @@ kernel_per:
.globl io_int_handler .globl io_int_handler
io_int_handler: io_int_handler:
stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+16 SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
...@@ -723,8 +723,8 @@ io_notify_resume: ...@@ -723,8 +723,8 @@ io_notify_resume:
.globl ext_int_handler .globl ext_int_handler
ext_int_handler: ext_int_handler:
stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+16 SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
...@@ -750,6 +750,7 @@ __critical_end: ...@@ -750,6 +750,7 @@ __critical_end:
.globl mcck_int_handler .globl mcck_int_handler
mcck_int_handler: mcck_int_handler:
stck __LC_INT_CLOCK
spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
SAVE_ALL_BASE __LC_SAVE_AREA+32 SAVE_ALL_BASE __LC_SAVE_AREA+32
......
...@@ -177,8 +177,11 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ ...@@ -177,8 +177,11 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
.if !\sync .if !\sync
ni \psworg+1,0xfd # clear wait state bit ni \psworg+1,0xfd # clear wait state bit
.endif .endif
lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r13,SP_R0(%r15) # load gprs 0-13 of user
stpt __LC_EXIT_TIMER stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r14,%r15,SP_R14(%r15) # load grps 14-15 of user
lpswe \psworg # back to caller lpswe \psworg # back to caller
.endm .endm
...@@ -559,8 +562,8 @@ kernel_per: ...@@ -559,8 +562,8 @@ kernel_per:
*/ */
.globl io_int_handler .globl io_int_handler
io_int_handler: io_int_handler:
stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+32 SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
...@@ -721,8 +724,8 @@ io_notify_resume: ...@@ -721,8 +724,8 @@ io_notify_resume:
*/ */
.globl ext_int_handler .globl ext_int_handler
ext_int_handler: ext_int_handler:
stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+32 SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
...@@ -746,6 +749,7 @@ __critical_end: ...@@ -746,6 +749,7 @@ __critical_end:
*/ */
.globl mcck_int_handler .globl mcck_int_handler
mcck_int_handler: mcck_int_handler:
stck __LC_INT_CLOCK
la %r1,4095 # revalidate r1 la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
...@@ -979,23 +983,23 @@ cleanup_sysc_return: ...@@ -979,23 +983,23 @@ cleanup_sysc_return:
cleanup_sysc_leave: cleanup_sysc_leave:
clc 8(8,%r12),BASED(cleanup_sysc_leave_insn) clc 8(8,%r12),BASED(cleanup_sysc_leave_insn)
je 2f je 3f
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8) clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
je 2f jhe 0f
mvc __LC_RETURN_PSW(16),SP_PSW(%r15) mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
cghi %r12,__LC_MCK_OLD_PSW cghi %r12,__LC_MCK_OLD_PSW
jne 0f jne 1f
mvc __LC_SAVE_AREA+64(32),SP_R12(%r15) mvc __LC_SAVE_AREA+64(32),SP_R12(%r15)
j 1f j 2f
0: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15) 1: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
1: lmg %r0,%r11,SP_R0(%r15) 2: lmg %r0,%r11,SP_R0(%r15)
lg %r15,SP_R15(%r15) lg %r15,SP_R15(%r15)
2: la %r12,__LC_RETURN_PSW 3: la %r12,__LC_RETURN_PSW
br %r14 br %r14
cleanup_sysc_leave_insn: cleanup_sysc_leave_insn:
.quad sysc_done - 4 .quad sysc_done - 4
.quad sysc_done - 8 .quad sysc_done - 16
cleanup_io_return: cleanup_io_return:
mvc __LC_RETURN_PSW(8),0(%r12) mvc __LC_RETURN_PSW(8),0(%r12)
...@@ -1005,23 +1009,23 @@ cleanup_io_return: ...@@ -1005,23 +1009,23 @@ cleanup_io_return:
cleanup_io_leave: cleanup_io_leave:
clc 8(8,%r12),BASED(cleanup_io_leave_insn) clc 8(8,%r12),BASED(cleanup_io_leave_insn)
je 2f je 3f
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
clc 8(8,%r12),BASED(cleanup_io_leave_insn+8) clc 8(8,%r12),BASED(cleanup_io_leave_insn+8)
je 2f jhe 0f
mvc __LC_RETURN_PSW(16),SP_PSW(%r15) mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
cghi %r12,__LC_MCK_OLD_PSW cghi %r12,__LC_MCK_OLD_PSW
jne 0f jne 1f
mvc __LC_SAVE_AREA+64(32),SP_R12(%r15) mvc __LC_SAVE_AREA+64(32),SP_R12(%r15)
j 1f j 2f
0: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15) 1: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
1: lmg %r0,%r11,SP_R0(%r15) 2: lmg %r0,%r11,SP_R0(%r15)
lg %r15,SP_R15(%r15) lg %r15,SP_R15(%r15)
2: la %r12,__LC_RETURN_PSW 3: la %r12,__LC_RETURN_PSW
br %r14 br %r14
cleanup_io_leave_insn: cleanup_io_leave_insn:
.quad io_done - 4 .quad io_done - 4
.quad io_done - 8 .quad io_done - 16
/* /*
* Integer constants * Integer constants
......
...@@ -87,6 +87,8 @@ startup_continue: ...@@ -87,6 +87,8 @@ startup_continue:
lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
# move IPL device to lowcore # move IPL device to lowcore
mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12) mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
lghi %r0,__LC_PASTE
stg %r0,__LC_VDSO_PER_CPU
# #
# Setup stack # Setup stack
# #
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/tick.h> #include <linux/tick.h>
#include <linux/elfcore.h> #include <linux/elfcore.h>
#include <linux/kernel_stat.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -45,7 +46,6 @@ ...@@ -45,7 +46,6 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/cpu.h>
#include "entry.h" #include "entry.h"
asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
...@@ -75,36 +75,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk) ...@@ -75,36 +75,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sf->gprs[8]; return sf->gprs[8];
} }
DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
.lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
};
static int s390_idle_enter(void)
{
struct s390_idle_data *idle;
idle = &__get_cpu_var(s390_idle);
spin_lock(&idle->lock);
idle->idle_count++;
idle->in_idle = 1;
idle->idle_enter = get_clock();
spin_unlock(&idle->lock);
vtime_stop_cpu_timer();
return NOTIFY_OK;
}
void s390_idle_leave(void)
{
struct s390_idle_data *idle;
vtime_start_cpu_timer();
idle = &__get_cpu_var(s390_idle);
spin_lock(&idle->lock);
idle->idle_time += get_clock() - idle->idle_enter;
idle->in_idle = 0;
spin_unlock(&idle->lock);
}
extern void s390_handle_mcck(void); extern void s390_handle_mcck(void);
/* /*
* The idle loop on a S390... * The idle loop on a S390...
...@@ -117,10 +87,6 @@ static void default_idle(void) ...@@ -117,10 +87,6 @@ static void default_idle(void)
local_irq_enable(); local_irq_enable();
return; return;
} }
if (s390_idle_enter() == NOTIFY_BAD) {
local_irq_enable();
return;
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id())) { if (cpu_is_offline(smp_processor_id())) {
preempt_enable_no_resched(); preempt_enable_no_resched();
...@@ -130,7 +96,6 @@ static void default_idle(void) ...@@ -130,7 +96,6 @@ static void default_idle(void)
local_mcck_disable(); local_mcck_disable();
if (test_thread_flag(TIF_MCCK_PENDING)) { if (test_thread_flag(TIF_MCCK_PENDING)) {
local_mcck_enable(); local_mcck_enable();
s390_idle_leave();
local_irq_enable(); local_irq_enable();
s390_handle_mcck(); s390_handle_mcck();
return; return;
...@@ -138,9 +103,9 @@ static void default_idle(void) ...@@ -138,9 +103,9 @@ static void default_idle(void)
trace_hardirqs_on(); trace_hardirqs_on();
/* Don't trace preempt off for idle. */ /* Don't trace preempt off for idle. */
stop_critical_timings(); stop_critical_timings();
/* Wait for external, I/O or machine check interrupt. */ /* Stop virtual timer and halt the cpu. */
__load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | vtime_stop_cpu();
PSW_MASK_IO | PSW_MASK_EXT); /* Reenable preemption tracer. */
start_critical_timings(); start_critical_timings();
} }
......
...@@ -119,8 +119,8 @@ void do_extint(struct pt_regs *regs, unsigned short code) ...@@ -119,8 +119,8 @@ void do_extint(struct pt_regs *regs, unsigned short code)
struct pt_regs *old_regs; struct pt_regs *old_regs;
old_regs = set_irq_regs(regs); old_regs = set_irq_regs(regs);
irq_enter();
s390_idle_check(); s390_idle_check();
irq_enter();
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/* Serve timer interrupts first. */ /* Serve timer interrupts first. */
clock_comparator_work(); clock_comparator_work();
......
...@@ -427,6 +427,8 @@ setup_lowcore(void) ...@@ -427,6 +427,8 @@ setup_lowcore(void)
/* enable extended save area */ /* enable extended save area */
__ctl_set_bit(14, 29); __ctl_set_bit(14, 29);
} }
#else
lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
#endif #endif
set_prefix((u32)(unsigned long) lc); set_prefix((u32)(unsigned long) lc);
} }
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/sclp.h> #include <asm/sclp.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/vdso.h>
#include "entry.h" #include "entry.h"
/* /*
...@@ -500,6 +501,9 @@ static int __cpuinit smp_alloc_lowcore(int cpu) ...@@ -500,6 +501,9 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
goto out; goto out;
lowcore->extended_save_area_addr = (u32) save_area; lowcore->extended_save_area_addr = (u32) save_area;
} }
#else
if (vdso_alloc_per_cpu(cpu, lowcore))
goto out;
#endif #endif
lowcore_ptr[cpu] = lowcore; lowcore_ptr[cpu] = lowcore;
return 0; return 0;
...@@ -522,6 +526,8 @@ static void smp_free_lowcore(int cpu) ...@@ -522,6 +526,8 @@ static void smp_free_lowcore(int cpu)
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) if (MACHINE_HAS_IEEE)
free_page((unsigned long) lowcore->extended_save_area_addr); free_page((unsigned long) lowcore->extended_save_area_addr);
#else
vdso_free_per_cpu(cpu, lowcore);
#endif #endif
free_page(lowcore->panic_stack - PAGE_SIZE); free_page(lowcore->panic_stack - PAGE_SIZE);
free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER); free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
...@@ -664,6 +670,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -664,6 +670,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order); lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
panic_stack = __get_free_page(GFP_KERNEL); panic_stack = __get_free_page(GFP_KERNEL);
async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
BUG_ON(!lowcore || !panic_stack || !async_stack);
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) if (MACHINE_HAS_IEEE)
save_area = get_zeroed_page(GFP_KERNEL); save_area = get_zeroed_page(GFP_KERNEL);
...@@ -677,6 +684,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -677,6 +684,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) if (MACHINE_HAS_IEEE)
lowcore->extended_save_area_addr = (u32) save_area; lowcore->extended_save_area_addr = (u32) save_area;
#else
BUG_ON(vdso_alloc_per_cpu(smp_processor_id(), lowcore));
#endif #endif
set_prefix((u32)(unsigned long) lowcore); set_prefix((u32)(unsigned long) lowcore);
local_mcck_enable(); local_mcck_enable();
...@@ -845,9 +854,11 @@ static ssize_t show_idle_count(struct sys_device *dev, ...@@ -845,9 +854,11 @@ static ssize_t show_idle_count(struct sys_device *dev,
unsigned long long idle_count; unsigned long long idle_count;
idle = &per_cpu(s390_idle, dev->id); idle = &per_cpu(s390_idle, dev->id);
spin_lock_irq(&idle->lock); spin_lock(&idle->lock);
idle_count = idle->idle_count; idle_count = idle->idle_count;
spin_unlock_irq(&idle->lock); if (idle->idle_enter)
idle_count++;
spin_unlock(&idle->lock);
return sprintf(buf, "%llu\n", idle_count); return sprintf(buf, "%llu\n", idle_count);
} }
static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL); static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
...@@ -856,18 +867,17 @@ static ssize_t show_idle_time(struct sys_device *dev, ...@@ -856,18 +867,17 @@ static ssize_t show_idle_time(struct sys_device *dev,
struct sysdev_attribute *attr, char *buf) struct sysdev_attribute *attr, char *buf)
{ {
struct s390_idle_data *idle; struct s390_idle_data *idle;
unsigned long long new_time; unsigned long long now, idle_time, idle_enter;
idle = &per_cpu(s390_idle, dev->id); idle = &per_cpu(s390_idle, dev->id);
spin_lock_irq(&idle->lock); spin_lock(&idle->lock);
if (idle->in_idle) { now = get_clock();
new_time = get_clock(); idle_time = idle->idle_time;
idle->idle_time += new_time - idle->idle_enter; idle_enter = idle->idle_enter;
idle->idle_enter = new_time; if (idle_enter != 0ULL && idle_enter < now)
} idle_time += now - idle_enter;
new_time = idle->idle_time; spin_unlock(&idle->lock);
spin_unlock_irq(&idle->lock); return sprintf(buf, "%llu\n", idle_time >> 12);
return sprintf(buf, "%llu\n", new_time >> 12);
} }
static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
......
...@@ -31,9 +31,6 @@ ...@@ -31,9 +31,6 @@
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/vdso.h> #include <asm/vdso.h>
/* Max supported size for symbol names */
#define MAX_SYMNAME 64
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
extern char vdso32_start, vdso32_end; extern char vdso32_start, vdso32_end;
static void *vdso32_kbase = &vdso32_start; static void *vdso32_kbase = &vdso32_start;
...@@ -70,6 +67,119 @@ static union { ...@@ -70,6 +67,119 @@ static union {
} vdso_data_store __attribute__((__section__(".data.page_aligned"))); } vdso_data_store __attribute__((__section__(".data.page_aligned")));
struct vdso_data *vdso_data = &vdso_data_store.data; struct vdso_data *vdso_data = &vdso_data_store.data;
/*
* Setup vdso data page.
*/
static void vdso_init_data(struct vdso_data *vd)
{
unsigned int facility_list;
facility_list = stfl();
vd->ectg_available = switch_amode && (facility_list & 1);
}
#ifdef CONFIG_64BIT
/*
* Setup per cpu vdso data page.
*/
static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
{
}
/*
* Allocate/free per cpu vdso data.
*/
#ifdef CONFIG_64BIT
#define SEGMENT_ORDER 2
#else
#define SEGMENT_ORDER 1
#endif
int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
{
unsigned long segment_table, page_table, page_frame;
u32 *psal, *aste;
int i;
lowcore->vdso_per_cpu_data = __LC_PASTE;
if (!switch_amode || !vdso_enabled)
return 0;
segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
page_frame = get_zeroed_page(GFP_KERNEL);
if (!segment_table || !page_table || !page_frame)
goto out;
clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
PAGE_SIZE << SEGMENT_ORDER);
clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY,
256*sizeof(unsigned long));
*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
*(unsigned long *) page_table = _PAGE_RO + page_frame;
psal = (u32 *) (page_table + 256*sizeof(unsigned long));
aste = psal + 32;
for (i = 4; i < 32; i += 4)
psal[i] = 0x80000000;
lowcore->paste[4] = (u32)(addr_t) psal;
psal[0] = 0x20000000;
psal[2] = (u32)(addr_t) aste;
*(unsigned long *) (aste + 2) = segment_table +
_ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
aste[4] = (u32)(addr_t) psal;
lowcore->vdso_per_cpu_data = page_frame;
vdso_init_per_cpu_data(cpu, (struct vdso_per_cpu_data *) page_frame);
return 0;
out:
free_page(page_frame);
free_page(page_table);
free_pages(segment_table, SEGMENT_ORDER);
return -ENOMEM;
}
#ifdef CONFIG_HOTPLUG_CPU
void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
{
unsigned long segment_table, page_table, page_frame;
u32 *psal, *aste;
if (!switch_amode || !vdso_enabled)
return;
psal = (u32 *)(addr_t) lowcore->paste[4];
aste = (u32 *)(addr_t) psal[2];
segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
page_table = *(unsigned long *) segment_table;
page_frame = *(unsigned long *) page_table;
free_page(page_frame);
free_page(page_table);
free_pages(segment_table, SEGMENT_ORDER);
}
#endif /* CONFIG_HOTPLUG_CPU */
static void __vdso_init_cr5(void *dummy)
{
unsigned long cr5;
cr5 = offsetof(struct _lowcore, paste);
__ctl_load(cr5, 5, 5);
}
static void vdso_init_cr5(void)
{
if (switch_amode && vdso_enabled)
on_each_cpu(__vdso_init_cr5, NULL, 1);
}
#endif /* CONFIG_64BIT */
/* /*
* This is called from binfmt_elf, we create the special vma for the * This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree * vDSO and insert it into the mm struct tree
...@@ -172,6 +282,9 @@ static int __init vdso_init(void) ...@@ -172,6 +282,9 @@ static int __init vdso_init(void)
{ {
int i; int i;
if (!vdso_enabled)
return 0;
vdso_init_data(vdso_data);
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
/* Calculate the size of the 32 bit vDSO */ /* Calculate the size of the 32 bit vDSO */
vdso32_pages = ((&vdso32_end - &vdso32_start vdso32_pages = ((&vdso32_end - &vdso32_start
...@@ -208,6 +321,10 @@ static int __init vdso_init(void) ...@@ -208,6 +321,10 @@ static int __init vdso_init(void)
} }
vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
vdso64_pagelist[vdso64_pages] = NULL; vdso64_pagelist[vdso64_pages] = NULL;
#ifndef CONFIG_SMP
BUG_ON(vdso_alloc_per_cpu(0, S390_lowcore));
#endif
vdso_init_cr5();
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
get_page(virt_to_page(vdso_data)); get_page(virt_to_page(vdso_data));
......
...@@ -22,7 +22,12 @@ __kernel_clock_getres: ...@@ -22,7 +22,12 @@ __kernel_clock_getres:
cghi %r2,CLOCK_REALTIME cghi %r2,CLOCK_REALTIME
je 0f je 0f
cghi %r2,CLOCK_MONOTONIC cghi %r2,CLOCK_MONOTONIC
je 0f
cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
jne 2f jne 2f
larl %r5,_vdso_data
icm %r0,15,__LC_ECTG_OK(%r5)
jz 2f
0: ltgr %r3,%r3 0: ltgr %r3,%r3
jz 1f /* res == NULL */ jz 1f /* res == NULL */
larl %r1,3f larl %r1,3f
......
...@@ -22,8 +22,10 @@ __kernel_clock_gettime: ...@@ -22,8 +22,10 @@ __kernel_clock_gettime:
larl %r5,_vdso_data larl %r5,_vdso_data
cghi %r2,CLOCK_REALTIME cghi %r2,CLOCK_REALTIME
je 4f je 4f
cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
je 9f
cghi %r2,CLOCK_MONOTONIC cghi %r2,CLOCK_MONOTONIC
jne 9f jne 12f
/* CLOCK_MONOTONIC */ /* CLOCK_MONOTONIC */
ltgr %r3,%r3 ltgr %r3,%r3
...@@ -42,7 +44,7 @@ __kernel_clock_gettime: ...@@ -42,7 +44,7 @@ __kernel_clock_gettime:
alg %r0,__VDSO_WTOM_SEC(%r5) alg %r0,__VDSO_WTOM_SEC(%r5)
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 0b jne 0b
larl %r5,10f larl %r5,13f
1: clg %r1,0(%r5) 1: clg %r1,0(%r5)
jl 2f jl 2f
slg %r1,0(%r5) slg %r1,0(%r5)
...@@ -68,7 +70,7 @@ __kernel_clock_gettime: ...@@ -68,7 +70,7 @@ __kernel_clock_gettime:
lg %r0,__VDSO_XTIME_SEC(%r5) lg %r0,__VDSO_XTIME_SEC(%r5)
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 5b jne 5b
larl %r5,10f larl %r5,13f
6: clg %r1,0(%r5) 6: clg %r1,0(%r5)
jl 7f jl 7f
slg %r1,0(%r5) slg %r1,0(%r5)
...@@ -79,11 +81,38 @@ __kernel_clock_gettime: ...@@ -79,11 +81,38 @@ __kernel_clock_gettime:
8: lghi %r2,0 8: lghi %r2,0
br %r14 br %r14
/* CLOCK_THREAD_CPUTIME_ID for this thread */
9: icm %r0,15,__VDSO_ECTG_OK(%r5)
jz 12f
ear %r2,%a4
llilh %r4,0x0100
sar %a4,%r4
lghi %r4,0
sacf 512 /* Magic ectg instruction */
.insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4
sacf 0
sar %a4,%r2
algr %r1,%r0 /* r1 = cputime as TOD value */
mghi %r1,1000 /* convert to nanoseconds */
srlg %r1,%r1,12 /* r1 = cputime in nanosec */
lgr %r4,%r1
larl %r5,13f
srlg %r1,%r1,9 /* divide by 1000000000 */
mlg %r0,8(%r5)
srlg %r0,%r0,11 /* r0 = tv_sec */
stg %r0,0(%r3)
msg %r0,0(%r5) /* calculate tv_nsec */
slgr %r4,%r0 /* r4 = tv_nsec */
stg %r4,8(%r3)
lghi %r2,0
br %r14
/* Fallback to system call */ /* Fallback to system call */
9: lghi %r1,__NR_clock_gettime 12: lghi %r1,__NR_clock_gettime
svc 0 svc 0
br %r14 br %r14
10: .quad 1000000000 13: .quad 1000000000
14: .quad 19342813113834067
.cfi_endproc .cfi_endproc
.size __kernel_clock_gettime,.-__kernel_clock_gettime .size __kernel_clock_gettime,.-__kernel_clock_gettime
This diff is collapsed.
...@@ -132,8 +132,7 @@ static void do_stolen_accounting(void) ...@@ -132,8 +132,7 @@ static void do_stolen_accounting(void)
*snap = state; *snap = state;
/* Add the appropriate number of ticks of stolen time, /* Add the appropriate number of ticks of stolen time,
including any left-overs from last time. Passing NULL to including any left-overs from last time. */
account_steal_time accounts the time as stolen. */
stolen = runnable + offline + __get_cpu_var(residual_stolen); stolen = runnable + offline + __get_cpu_var(residual_stolen);
if (stolen < 0) if (stolen < 0)
...@@ -141,11 +140,10 @@ static void do_stolen_accounting(void) ...@@ -141,11 +140,10 @@ static void do_stolen_accounting(void)
ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
__get_cpu_var(residual_stolen) = stolen; __get_cpu_var(residual_stolen) = stolen;
account_steal_time(NULL, ticks); account_steal_ticks(ticks);
/* Add the appropriate number of ticks of blocked time, /* Add the appropriate number of ticks of blocked time,
including any left-overs from last time. Passing idle to including any left-overs from last time. */
account_steal_time accounts the time as idle/wait. */
blocked += __get_cpu_var(residual_blocked); blocked += __get_cpu_var(residual_blocked);
if (blocked < 0) if (blocked < 0)
...@@ -153,7 +151,7 @@ static void do_stolen_accounting(void) ...@@ -153,7 +151,7 @@ static void do_stolen_accounting(void)
ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
__get_cpu_var(residual_blocked) = blocked; __get_cpu_var(residual_blocked) = blocked;
account_steal_time(idle_task(smp_processor_id()), ticks); account_idle_ticks(ticks);
} }
/* /*
......
...@@ -632,8 +632,8 @@ do_IRQ (struct pt_regs *regs) ...@@ -632,8 +632,8 @@ do_IRQ (struct pt_regs *regs)
struct pt_regs *old_regs; struct pt_regs *old_regs;
old_regs = set_irq_regs(regs); old_regs = set_irq_regs(regs);
irq_enter();
s390_idle_check(); s390_idle_check();
irq_enter();
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/* Serve timer interrupts first. */ /* Serve timer interrupts first. */
clock_comparator_work(); clock_comparator_work();
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/etr.h> #include <asm/etr.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/cio.h> #include <asm/cio.h>
#include <asm/cpu.h>
#include "s390mach.h" #include "s390mach.h"
static struct semaphore m_sem; static struct semaphore m_sem;
...@@ -369,6 +370,8 @@ s390_do_machine_check(struct pt_regs *regs) ...@@ -369,6 +370,8 @@ s390_do_machine_check(struct pt_regs *regs)
lockdep_off(); lockdep_off();
s390_idle_check();
mci = (struct mci *) &S390_lowcore.mcck_interruption_code; mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
mcck = &__get_cpu_var(cpu_mcck); mcck = &__get_cpu_var(cpu_mcck);
umode = user_mode(regs); umode = user_mode(regs);
......
...@@ -79,10 +79,13 @@ static inline unsigned int kstat_irqs(unsigned int irq) ...@@ -79,10 +79,13 @@ static inline unsigned int kstat_irqs(unsigned int irq)
} }
extern unsigned long long task_delta_exec(struct task_struct *); extern unsigned long long task_delta_exec(struct task_struct *);
extern void account_user_time(struct task_struct *, cputime_t); extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
extern void account_user_time_scaled(struct task_struct *, cputime_t); extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
extern void account_system_time(struct task_struct *, int, cputime_t); extern void account_steal_time(cputime_t);
extern void account_system_time_scaled(struct task_struct *, cputime_t); extern void account_idle_time(cputime_t);
extern void account_steal_time(struct task_struct *, cputime_t);
extern void account_process_tick(struct task_struct *, int user);
extern void account_steal_ticks(unsigned long ticks);
extern void account_idle_ticks(unsigned long ticks);
#endif /* _LINUX_KERNEL_STAT_H */ #endif /* _LINUX_KERNEL_STAT_H */
...@@ -284,7 +284,6 @@ long io_schedule_timeout(long timeout); ...@@ -284,7 +284,6 @@ long io_schedule_timeout(long timeout);
extern void cpu_init (void); extern void cpu_init (void);
extern void trap_init(void); extern void trap_init(void);
extern void account_process_tick(struct task_struct *task, int user);
extern void update_process_times(int user); extern void update_process_times(int user);
extern void scheduler_tick(void); extern void scheduler_tick(void);
......
...@@ -4150,13 +4150,17 @@ unsigned long long task_delta_exec(struct task_struct *p) ...@@ -4150,13 +4150,17 @@ unsigned long long task_delta_exec(struct task_struct *p)
* Account user cpu time to a process. * Account user cpu time to a process.
* @p: the process that the cpu time gets accounted to * @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in user space since the last update * @cputime: the cpu time spent in user space since the last update
* @cputime_scaled: cputime scaled by cpu frequency
*/ */
void account_user_time(struct task_struct *p, cputime_t cputime) void account_user_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled)
{ {
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t tmp; cputime64_t tmp;
/* Add user time to process. */
p->utime = cputime_add(p->utime, cputime); p->utime = cputime_add(p->utime, cputime);
p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
account_group_user_time(p, cputime); account_group_user_time(p, cputime);
/* Add user time to cpustat. */ /* Add user time to cpustat. */
...@@ -4173,51 +4177,48 @@ void account_user_time(struct task_struct *p, cputime_t cputime) ...@@ -4173,51 +4177,48 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
* Account guest cpu time to a process. * Account guest cpu time to a process.
* @p: the process that the cpu time gets accounted to * @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in virtual machine since the last update * @cputime: the cpu time spent in virtual machine since the last update
* @cputime_scaled: cputime scaled by cpu frequency
*/ */
static void account_guest_time(struct task_struct *p, cputime_t cputime) static void account_guest_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled)
{ {
cputime64_t tmp; cputime64_t tmp;
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
tmp = cputime_to_cputime64(cputime); tmp = cputime_to_cputime64(cputime);
/* Add guest time to process. */
p->utime = cputime_add(p->utime, cputime); p->utime = cputime_add(p->utime, cputime);
p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
account_group_user_time(p, cputime); account_group_user_time(p, cputime);
p->gtime = cputime_add(p->gtime, cputime); p->gtime = cputime_add(p->gtime, cputime);
/* Add guest time to cpustat. */
cpustat->user = cputime64_add(cpustat->user, tmp); cpustat->user = cputime64_add(cpustat->user, tmp);
cpustat->guest = cputime64_add(cpustat->guest, tmp); cpustat->guest = cputime64_add(cpustat->guest, tmp);
} }
/*
* Account scaled user cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in user space since the last update
*/
void account_user_time_scaled(struct task_struct *p, cputime_t cputime)
{
p->utimescaled = cputime_add(p->utimescaled, cputime);
}
/* /*
* Account system cpu time to a process. * Account system cpu time to a process.
* @p: the process that the cpu time gets accounted to * @p: the process that the cpu time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count() * @hardirq_offset: the offset to subtract from hardirq_count()
* @cputime: the cpu time spent in kernel space since the last update * @cputime: the cpu time spent in kernel space since the last update
* @cputime_scaled: cputime scaled by cpu frequency
*/ */
void account_system_time(struct task_struct *p, int hardirq_offset, void account_system_time(struct task_struct *p, int hardirq_offset,
cputime_t cputime) cputime_t cputime, cputime_t cputime_scaled)
{ {
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
struct rq *rq = this_rq();
cputime64_t tmp; cputime64_t tmp;
if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
account_guest_time(p, cputime); account_guest_time(p, cputime, cputime_scaled);
return; return;
} }
/* Add system time to process. */
p->stime = cputime_add(p->stime, cputime); p->stime = cputime_add(p->stime, cputime);
p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
account_group_system_time(p, cputime); account_group_system_time(p, cputime);
/* Add system time to cpustat. */ /* Add system time to cpustat. */
...@@ -4226,48 +4227,84 @@ void account_system_time(struct task_struct *p, int hardirq_offset, ...@@ -4226,48 +4227,84 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
cpustat->irq = cputime64_add(cpustat->irq, tmp); cpustat->irq = cputime64_add(cpustat->irq, tmp);
else if (softirq_count()) else if (softirq_count())
cpustat->softirq = cputime64_add(cpustat->softirq, tmp); cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
else if (p != rq->idle)
cpustat->system = cputime64_add(cpustat->system, tmp);
else if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
else else
cpustat->idle = cputime64_add(cpustat->idle, tmp); cpustat->system = cputime64_add(cpustat->system, tmp);
/* Account for system time used */ /* Account for system time used */
acct_update_integrals(p); acct_update_integrals(p);
} }
/* /*
* Account scaled system cpu time to a process. * Account for involuntary wait time.
* @p: the process that the cpu time gets accounted to * @steal: the cpu time spent in involuntary wait
* @hardirq_offset: the offset to subtract from hardirq_count()
* @cputime: the cpu time spent in kernel space since the last update
*/ */
void account_system_time_scaled(struct task_struct *p, cputime_t cputime) void account_steal_time(cputime_t cputime)
{ {
p->stimescaled = cputime_add(p->stimescaled, cputime); struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t cputime64 = cputime_to_cputime64(cputime);
cpustat->steal = cputime64_add(cpustat->steal, cputime64);
} }
/* /*
* Account for involuntary wait time. * Account for idle time.
* @p: the process from which the cpu time has been stolen * @cputime: the cpu time spent in idle wait
* @steal: the cpu time spent in involuntary wait
*/ */
void account_steal_time(struct task_struct *p, cputime_t steal) void account_idle_time(cputime_t cputime)
{ {
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t tmp = cputime_to_cputime64(steal); cputime64_t cputime64 = cputime_to_cputime64(cputime);
struct rq *rq = this_rq(); struct rq *rq = this_rq();
if (p == rq->idle) { if (atomic_read(&rq->nr_iowait) > 0)
p->stime = cputime_add(p->stime, steal); cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
if (atomic_read(&rq->nr_iowait) > 0) else
cpustat->iowait = cputime64_add(cpustat->iowait, tmp); cpustat->idle = cputime64_add(cpustat->idle, cputime64);
else
cpustat->idle = cputime64_add(cpustat->idle, tmp);
} else
cpustat->steal = cputime64_add(cpustat->steal, tmp);
} }
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
/*
* Account a single tick of cpu time.
* @p: the process that the cpu time gets accounted to
* @user_tick: indicates if the tick is a user or a system tick
*/
void account_process_tick(struct task_struct *p, int user_tick)
{
cputime_t one_jiffy = jiffies_to_cputime(1);
cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
struct rq *rq = this_rq();
if (user_tick)
account_user_time(p, one_jiffy, one_jiffy_scaled);
else if (p != rq->idle)
account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
one_jiffy_scaled);
else
account_idle_time(one_jiffy);
}
/*
* Account multiple ticks of steal time.
* @p: the process from which the cpu time has been stolen
* @ticks: number of stolen ticks
*/
void account_steal_ticks(unsigned long ticks)
{
account_steal_time(jiffies_to_cputime(ticks));
}
/*
* Account multiple ticks of idle time.
* @ticks: number of stolen ticks
*/
void account_idle_ticks(unsigned long ticks)
{
account_idle_time(jiffies_to_cputime(ticks));
}
#endif
/* /*
* Use precise platform statistics if available: * Use precise platform statistics if available:
*/ */
......
...@@ -419,7 +419,9 @@ void tick_nohz_restart_sched_tick(void) ...@@ -419,7 +419,9 @@ void tick_nohz_restart_sched_tick(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
unsigned long ticks; unsigned long ticks;
#endif
ktime_t now; ktime_t now;
local_irq_disable(); local_irq_disable();
...@@ -441,6 +443,7 @@ void tick_nohz_restart_sched_tick(void) ...@@ -441,6 +443,7 @@ void tick_nohz_restart_sched_tick(void)
tick_do_update_jiffies64(now); tick_do_update_jiffies64(now);
cpumask_clear_cpu(cpu, nohz_cpu_mask); cpumask_clear_cpu(cpu, nohz_cpu_mask);
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
/* /*
* We stopped the tick in idle. Update process times would miss the * We stopped the tick in idle. Update process times would miss the
* time we slept as update_process_times does only a 1 tick * time we slept as update_process_times does only a 1 tick
...@@ -450,12 +453,9 @@ void tick_nohz_restart_sched_tick(void) ...@@ -450,12 +453,9 @@ void tick_nohz_restart_sched_tick(void)
/* /*
* We might be one off. Do not randomly account a huge number of ticks! * We might be one off. Do not randomly account a huge number of ticks!
*/ */
if (ticks && ticks < LONG_MAX) { if (ticks && ticks < LONG_MAX)
add_preempt_count(HARDIRQ_OFFSET); account_idle_ticks(ticks);
account_system_time(current, HARDIRQ_OFFSET, #endif
jiffies_to_cputime(ticks));
sub_preempt_count(HARDIRQ_OFFSET);
}
touch_softlockup_watchdog(); touch_softlockup_watchdog();
/* /*
......
...@@ -1018,21 +1018,6 @@ unsigned long get_next_timer_interrupt(unsigned long now) ...@@ -1018,21 +1018,6 @@ unsigned long get_next_timer_interrupt(unsigned long now)
} }
#endif #endif
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
void account_process_tick(struct task_struct *p, int user_tick)
{
cputime_t one_jiffy = jiffies_to_cputime(1);
if (user_tick) {
account_user_time(p, one_jiffy);
account_user_time_scaled(p, cputime_to_scaled(one_jiffy));
} else {
account_system_time(p, HARDIRQ_OFFSET, one_jiffy);
account_system_time_scaled(p, cputime_to_scaled(one_jiffy));
}
}
#endif
/* /*
* Called from the timer interrupt handler to charge one tick to the current * Called from the timer interrupt handler to charge one tick to the current
* process. user_tick is 1 if the tick is user time, 0 for system. * process. user_tick is 1 if the tick is user time, 0 for system.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment