Commit 70293240 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'timers-urgent-2024-03-23' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer fixes from Thomas Gleixner:
 "Two regression fixes for the timer and timer migration code:

   - Prevent endless timer requeuing which is caused by two CPUs racing
     out of idle. This happens when the last CPU goes idle and therefore
     has to ensure to expire the pending global timers and some other
     CPU come out of idle at the same time and the other CPU wins the
     race and expires the global queue. This causes the last CPU to
     chase ghost timers forever and reprogramming it's clockevent device
     endlessly.

     Cure this by re-evaluating the wakeup time unconditionally.

   - The split into local (pinned) and global timers in the timer wheel
     caused a regression for NOHZ full as it broke the idle tracking of
     global timers. On NOHZ full this prevents an self IPI being sent
     which in turn causes the timer to be not programmed and not being
     expired on time.

     Restore the idle tracking for the global timer base so that the
     self IPI condition for NOHZ full is working correctly again"

* tag 'timers-urgent-2024-03-23' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  timers: Fix removed self-IPI on global timer's enqueue in nohz_full
  timers/migration: Fix endless timer requeue after idle interrupts
parents 00164f47 03877039
...@@ -642,7 +642,8 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) ...@@ -642,7 +642,8 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
* the base lock: * the base lock:
*/ */
if (base->is_idle) { if (base->is_idle) {
WARN_ON_ONCE(!(timer->flags & TIMER_PINNED)); WARN_ON_ONCE(!(timer->flags & TIMER_PINNED ||
tick_nohz_full_cpu(base->cpu)));
wake_up_nohz_cpu(base->cpu); wake_up_nohz_cpu(base->cpu);
} }
} }
...@@ -2292,6 +2293,13 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, ...@@ -2292,6 +2293,13 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
*/ */
if (!base_local->is_idle && time_after(nextevt, basej + 1)) { if (!base_local->is_idle && time_after(nextevt, basej + 1)) {
base_local->is_idle = true; base_local->is_idle = true;
/*
* Global timers queued locally while running in a task
* in nohz_full mode need a self-IPI to kick reprogramming
* in IRQ tail.
*/
if (tick_nohz_full_cpu(base_local->cpu))
base_global->is_idle = true;
trace_timer_base_idle(true, base_local->cpu); trace_timer_base_idle(true, base_local->cpu);
} }
*idle = base_local->is_idle; *idle = base_local->is_idle;
...@@ -2364,6 +2372,8 @@ void timer_clear_idle(void) ...@@ -2364,6 +2372,8 @@ void timer_clear_idle(void)
* path. Required for BASE_LOCAL only. * path. Required for BASE_LOCAL only.
*/ */
__this_cpu_write(timer_bases[BASE_LOCAL].is_idle, false); __this_cpu_write(timer_bases[BASE_LOCAL].is_idle, false);
if (tick_nohz_full_cpu(smp_processor_id()))
__this_cpu_write(timer_bases[BASE_GLOBAL].is_idle, false);
trace_timer_base_idle(false, smp_processor_id()); trace_timer_base_idle(false, smp_processor_id());
/* Activate without holding the timer_base->lock */ /* Activate without holding the timer_base->lock */
......
...@@ -1038,8 +1038,15 @@ void tmigr_handle_remote(void) ...@@ -1038,8 +1038,15 @@ void tmigr_handle_remote(void)
* in tmigr_handle_remote_up() anyway. Keep this check to speed up the * in tmigr_handle_remote_up() anyway. Keep this check to speed up the
* return when nothing has to be done. * return when nothing has to be done.
*/ */
if (!tmigr_check_migrator(tmc->tmgroup, tmc->childmask)) if (!tmigr_check_migrator(tmc->tmgroup, tmc->childmask)) {
return; /*
* If this CPU was an idle migrator, make sure to clear its wakeup
* value so it won't chase timers that have already expired elsewhere.
* This avoids endless requeue from tmigr_new_timer().
*/
if (READ_ONCE(tmc->wakeup) == KTIME_MAX)
return;
}
data.now = get_jiffies_update(&data.basej); data.now = get_jiffies_update(&data.basej);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment