Commit 45a7905f authored by Will Deacon's avatar Will Deacon Committed by Catalin Marinas

arm64: vdso: defer shifting of nanosecond component of timespec

Shifting the nanosecond component of the computed timespec early can
lead to sub-ns inaccuracies when using the truncated value as input to
further arithmetic for things like conversions to monotonic time.

This patch defers the timespec shifting until after the final value has
been computed.
Reported-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent d91fb5c2
...@@ -239,7 +239,7 @@ void update_vsyscall(struct timekeeper *tk) ...@@ -239,7 +239,7 @@ void update_vsyscall(struct timekeeper *tk)
if (!use_syscall) { if (!use_syscall) {
vdso_data->cs_cycle_last = tk->clock->cycle_last; vdso_data->cs_cycle_last = tk->clock->cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec; vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->xtime_nsec >> tk->shift; vdso_data->xtime_clock_nsec = tk->xtime_nsec;
vdso_data->cs_mult = tk->mult; vdso_data->cs_mult = tk->mult;
vdso_data->cs_shift = tk->shift; vdso_data->cs_shift = tk->shift;
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
......
...@@ -66,6 +66,7 @@ ENTRY(__kernel_gettimeofday) ...@@ -66,6 +66,7 @@ ENTRY(__kernel_gettimeofday)
/* Convert ns to us. */ /* Convert ns to us. */
mov x13, #1000 mov x13, #1000
lsl x13, x13, x12
udiv x11, x11, x13 udiv x11, x11, x13
stp x10, x11, [x0, #TVAL_TV_SEC] stp x10, x11, [x0, #TVAL_TV_SEC]
2: 2:
...@@ -136,11 +137,13 @@ ENTRY(__kernel_clock_gettime) ...@@ -136,11 +137,13 @@ ENTRY(__kernel_clock_gettime)
4: 4:
/* Add on wtm timespec. */ /* Add on wtm timespec. */
add x10, x10, x13 add x10, x10, x13
lsl x14, x14, x12
add x11, x11, x14 add x11, x11, x14
/* Normalise the new timespec. */ /* Normalise the new timespec. */
mov x15, #NSEC_PER_SEC_LO16 mov x15, #NSEC_PER_SEC_LO16
movk x15, #NSEC_PER_SEC_HI16, lsl #16 movk x15, #NSEC_PER_SEC_HI16, lsl #16
lsl x15, x15, x12
cmp x11, x15 cmp x11, x15
b.lt 5f b.lt 5f
sub x11, x11, x15 sub x11, x11, x15
...@@ -152,6 +155,7 @@ ENTRY(__kernel_clock_gettime) ...@@ -152,6 +155,7 @@ ENTRY(__kernel_clock_gettime)
sub x10, x10, #1 sub x10, x10, #1
6: /* Store to the user timespec. */ 6: /* Store to the user timespec. */
lsr x11, x11, x12
stp x10, x11, [x1, #TSPEC_TV_SEC] stp x10, x11, [x1, #TSPEC_TV_SEC]
mov x0, xzr mov x0, xzr
ret x2 ret x2
...@@ -204,7 +208,7 @@ ENDPROC(__kernel_clock_getres) ...@@ -204,7 +208,7 @@ ENDPROC(__kernel_clock_getres)
* Clobbers the temporary registers (x9 - x15). * Clobbers the temporary registers (x9 - x15).
* Returns: * Returns:
* - w9 = vDSO sequence counter * - w9 = vDSO sequence counter
* - (x10, x11) = (ts->tv_sec, ts->tv_nsec) * - (x10, x11) = (ts->tv_sec, shifted ts->tv_nsec)
* - w12 = cs_shift * - w12 = cs_shift
*/ */
ENTRY(__do_get_tspec) ENTRY(__do_get_tspec)
...@@ -226,11 +230,11 @@ ENTRY(__do_get_tspec) ...@@ -226,11 +230,11 @@ ENTRY(__do_get_tspec)
movn x15, #0xff00, lsl #48 movn x15, #0xff00, lsl #48
and x10, x15, x10 and x10, x15, x10
mul x10, x10, x11 mul x10, x10, x11
lsr x10, x10, x12
/* Use the kernel time to calculate the new timespec. */ /* Use the kernel time to calculate the new timespec. */
mov x11, #NSEC_PER_SEC_LO16 mov x11, #NSEC_PER_SEC_LO16
movk x11, #NSEC_PER_SEC_HI16, lsl #16 movk x11, #NSEC_PER_SEC_HI16, lsl #16
lsl x11, x11, x12
add x15, x10, x14 add x15, x10, x14
udiv x14, x15, x11 udiv x14, x15, x11
add x10, x13, x14 add x10, x13, x14
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment