Commit 650ea024 authored by John Stultz's avatar John Stultz

time: Convert x86_64 to using new update_vsyscall

Switch x86_64 to using sub-ns precise vsyscall

Cc: Tony Luck <tony.luck@intel.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Paul Turner <pjt@google.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Richard Cochran <richardcochran@gmail.com>
Cc: Prarit Bhargava <prarit@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
parent 92bb1fcf
...@@ -93,7 +93,7 @@ config X86 ...@@ -93,7 +93,7 @@ config X86
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select ARCH_CLOCKSOURCE_DATA if X86_64 select ARCH_CLOCKSOURCE_DATA if X86_64
select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
select GENERIC_TIME_VSYSCALL_OLD if X86_64 select GENERIC_TIME_VSYSCALL if X86_64
select KTIME_SCALAR if X86_32 select KTIME_SCALAR if X86_32
select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER
......
...@@ -17,8 +17,8 @@ struct vsyscall_gtod_data { ...@@ -17,8 +17,8 @@ struct vsyscall_gtod_data {
/* open coded 'struct timespec' */ /* open coded 'struct timespec' */
time_t wall_time_sec; time_t wall_time_sec;
u32 wall_time_nsec; u64 wall_time_snsec;
u32 monotonic_time_nsec; u64 monotonic_time_snsec;
time_t monotonic_time_sec; time_t monotonic_time_sec;
struct timezone sys_tz; struct timezone sys_tz;
......
...@@ -82,32 +82,41 @@ void update_vsyscall_tz(void) ...@@ -82,32 +82,41 @@ void update_vsyscall_tz(void)
vsyscall_gtod_data.sys_tz = sys_tz; vsyscall_gtod_data.sys_tz = sys_tz;
} }
void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, void update_vsyscall(struct timekeeper *tk)
struct clocksource *clock, u32 mult)
{ {
struct timespec monotonic; struct vsyscall_gtod_data *vdata = &vsyscall_gtod_data;
write_seqcount_begin(&vsyscall_gtod_data.seq); write_seqcount_begin(&vdata->seq);
/* copy vsyscall data */ /* copy vsyscall data */
vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode;
vsyscall_gtod_data.clock.cycle_last = clock->cycle_last; vdata->clock.cycle_last = tk->clock->cycle_last;
vsyscall_gtod_data.clock.mask = clock->mask; vdata->clock.mask = tk->clock->mask;
vsyscall_gtod_data.clock.mult = mult; vdata->clock.mult = tk->mult;
vsyscall_gtod_data.clock.shift = clock->shift; vdata->clock.shift = tk->shift;
vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; vdata->wall_time_sec = tk->xtime_sec;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; vdata->wall_time_snsec = tk->xtime_nsec;
vdata->monotonic_time_sec = tk->xtime_sec
+ tk->wall_to_monotonic.tv_sec;
vdata->monotonic_time_snsec = tk->xtime_nsec
+ (tk->wall_to_monotonic.tv_nsec
<< tk->shift);
while (vdata->monotonic_time_snsec >=
(((u64)NSEC_PER_SEC) << tk->shift)) {
vdata->monotonic_time_snsec -=
((u64)NSEC_PER_SEC) << tk->shift;
vdata->monotonic_time_sec++;
}
monotonic = timespec_add(*wall_time, *wtm); vdata->wall_time_coarse.tv_sec = tk->xtime_sec;
vsyscall_gtod_data.monotonic_time_sec = monotonic.tv_sec; vdata->wall_time_coarse.tv_nsec = (long)(tk->xtime_nsec >> tk->shift);
vsyscall_gtod_data.monotonic_time_nsec = monotonic.tv_nsec;
vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); vdata->monotonic_time_coarse = timespec_add(vdata->wall_time_coarse,
vsyscall_gtod_data.monotonic_time_coarse = tk->wall_to_monotonic);
timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm);
write_seqcount_end(&vsyscall_gtod_data.seq); write_seqcount_end(&vdata->seq);
} }
static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
......
...@@ -80,7 +80,7 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) ...@@ -80,7 +80,7 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
} }
notrace static inline long vgetns(void) notrace static inline u64 vgetsns(void)
{ {
long v; long v;
cycles_t cycles; cycles_t cycles;
...@@ -91,21 +91,24 @@ notrace static inline long vgetns(void) ...@@ -91,21 +91,24 @@ notrace static inline long vgetns(void)
else else
return 0; return 0;
v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask; v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
return (v * gtod->clock.mult) >> gtod->clock.shift; return v * gtod->clock.mult;
} }
/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */ /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
notrace static int __always_inline do_realtime(struct timespec *ts) notrace static int __always_inline do_realtime(struct timespec *ts)
{ {
unsigned long seq, ns; unsigned long seq;
u64 ns;
int mode; int mode;
ts->tv_nsec = 0;
do { do {
seq = read_seqcount_begin(&gtod->seq); seq = read_seqcount_begin(&gtod->seq);
mode = gtod->clock.vclock_mode; mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->wall_time_sec; ts->tv_sec = gtod->wall_time_sec;
ts->tv_nsec = gtod->wall_time_nsec; ns = gtod->wall_time_snsec;
ns = vgetns(); ns += vgetsns();
ns >>= gtod->clock.shift;
} while (unlikely(read_seqcount_retry(&gtod->seq, seq))); } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
timespec_add_ns(ts, ns); timespec_add_ns(ts, ns);
...@@ -114,15 +117,18 @@ notrace static int __always_inline do_realtime(struct timespec *ts) ...@@ -114,15 +117,18 @@ notrace static int __always_inline do_realtime(struct timespec *ts)
notrace static int do_monotonic(struct timespec *ts) notrace static int do_monotonic(struct timespec *ts)
{ {
unsigned long seq, ns; unsigned long seq;
u64 ns;
int mode; int mode;
ts->tv_nsec = 0;
do { do {
seq = read_seqcount_begin(&gtod->seq); seq = read_seqcount_begin(&gtod->seq);
mode = gtod->clock.vclock_mode; mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->monotonic_time_sec; ts->tv_sec = gtod->monotonic_time_sec;
ts->tv_nsec = gtod->monotonic_time_nsec; ns = gtod->monotonic_time_snsec;
ns = vgetns(); ns += vgetsns();
ns >>= gtod->clock.shift;
} while (unlikely(read_seqcount_retry(&gtod->seq, seq))); } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
timespec_add_ns(ts, ns); timespec_add_ns(ts, ns);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment