Commit fcf190c3 authored by Adrian Hunter's avatar Adrian Hunter Committed by Thomas Gleixner

timekeeping: Make delta calculation overflow safe

Kernel timekeeping is designed to keep the change in cycles (since the last
timer interrupt) below max_cycles, which prevents multiplication overflow
when converting cycles to nanoseconds. However, if timer interrupts stop,
the calculation will eventually overflow.

Add protection against that. In timekeeping_cycles_to_ns() calculation,
check against max_cycles, falling back to a slower higher precision
calculation. In timekeeping_forward_now(), process delta in chunks of at
most max_cycles.
Suggested-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAdrian Hunter <adrian.hunter@intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20240325064023.2997-18-adrian.hunter@intel.com
parent e809a80a
......@@ -364,19 +364,32 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
}
/* Timekeeper helper functions. */
static noinline u64 delta_to_ns_safe(const struct tk_read_base *tkr, u64 delta)
{
return mul_u64_u32_add_u64_shr(delta, tkr->mult, tkr->xtime_nsec, tkr->shift);
}
static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
{
/* Calculate the delta since the last update_wall_time() */
u64 mask = tkr->mask, delta = (cycles - tkr->cycle_last) & mask;
if (IS_ENABLED(CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE)) {
/*
* Handle clocksource inconsistency between CPUs to prevent
* time from going backwards by checking for the MSB of the
* mask being set in the delta.
*/
if (unlikely(delta & ~(mask >> 1)))
return tkr->xtime_nsec >> tkr->shift;
/*
* This detects the case where the delta overflows the multiplication
* with tkr->mult.
*/
if (unlikely(delta > tkr->clock->max_cycles)) {
if (IS_ENABLED(CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE)) {
/*
* Handle clocksource inconsistency between CPUs to prevent
* time from going backwards by checking for the MSB of the
* mask being set in the delta.
*/
if (unlikely(delta & ~(mask >> 1)))
return tkr->xtime_nsec >> tkr->shift;
}
return delta_to_ns_safe(tkr, delta);
}
return ((delta * tkr->mult) + tkr->xtime_nsec) >> tkr->shift;
......@@ -789,10 +802,15 @@ static void timekeeping_forward_now(struct timekeeper *tk)
tk->tkr_mono.cycle_last = cycle_now;
tk->tkr_raw.cycle_last = cycle_now;
tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
while (delta > 0) {
u64 max = tk->tkr_mono.clock->max_cycles;
u64 incr = delta < max ? delta : max;
tk_normalize_xtime(tk);
tk->tkr_mono.xtime_nsec += incr * tk->tkr_mono.mult;
tk->tkr_raw.xtime_nsec += incr * tk->tkr_raw.mult;
tk_normalize_xtime(tk);
delta -= incr;
}
}
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment