Commit 99f92594 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'sched-urgent-2021-06-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Misc fixes:

   - Fix performance regression caused by lack of intended batching of
     RCU callbacks by over-eager NOHZ-full code.

   - Fix cgroups related corruption of load_avg and load_sum metrics.

   - Three fixes to fix blocked load, util_sum/runnable_sum and util_est
     tracking bugs"

* tag 'sched-urgent-2021-06-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Fix util_est UTIL_AVG_UNCHANGED handling
  sched/pelt: Ensure that *_sum is always synced with *_avg
  tick/nohz: Only check for RCU deferred wakeup on user/guest entry when needed
  sched/fair: Make sure to update tg contrib for blocked load
  sched/fair: Keep load_avg and load_sum synced
parents 191aaf6c 68d7a190
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#define __LINUX_ENTRYKVM_H #define __LINUX_ENTRYKVM_H
#include <linux/entry-common.h> #include <linux/entry-common.h>
#include <linux/tick.h>
/* Transfer to guest mode work */ /* Transfer to guest mode work */
#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
...@@ -57,7 +58,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu); ...@@ -57,7 +58,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
static inline void xfer_to_guest_mode_prepare(void) static inline void xfer_to_guest_mode_prepare(void)
{ {
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
rcu_nocb_flush_deferred_wakeup(); tick_nohz_user_enter_prepare();
} }
/** /**
......
...@@ -350,11 +350,19 @@ struct load_weight { ...@@ -350,11 +350,19 @@ struct load_weight {
* Only for tasks we track a moving average of the past instantaneous * Only for tasks we track a moving average of the past instantaneous
* estimated utilization. This allows to absorb sporadic drops in utilization * estimated utilization. This allows to absorb sporadic drops in utilization
* of an otherwise almost periodic task. * of an otherwise almost periodic task.
*
* The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
* updates. When a task is dequeued, its util_est should not be updated if its
* util_avg has not been updated in the meantime.
* This information is mapped into the MSB bit of util_est.enqueued at dequeue
* time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
* for a task) it is safe to use MSB.
*/ */
struct util_est { struct util_est {
unsigned int enqueued; unsigned int enqueued;
unsigned int ewma; unsigned int ewma;
#define UTIL_EST_WEIGHT_SHIFT 2 #define UTIL_EST_WEIGHT_SHIFT 2
#define UTIL_AVG_UNCHANGED 0x80000000
} __attribute__((__aligned__(sizeof(u64)))); } __attribute__((__aligned__(sizeof(u64))));
/* /*
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/context_tracking_state.h> #include <linux/context_tracking_state.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/rcupdate.h>
#ifdef CONFIG_GENERIC_CLOCKEVENTS #ifdef CONFIG_GENERIC_CLOCKEVENTS
extern void __init tick_init(void); extern void __init tick_init(void);
...@@ -300,4 +301,10 @@ static inline void tick_nohz_task_switch(void) ...@@ -300,4 +301,10 @@ static inline void tick_nohz_task_switch(void)
__tick_nohz_task_switch(); __tick_nohz_task_switch();
} }
static inline void tick_nohz_user_enter_prepare(void)
{
if (tick_nohz_full_cpu(smp_processor_id()))
rcu_nocb_flush_deferred_wakeup();
}
#endif #endif
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/livepatch.h> #include <linux/livepatch.h>
#include <linux/audit.h> #include <linux/audit.h>
#include <linux/tick.h>
#include "common.h" #include "common.h"
...@@ -186,7 +187,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, ...@@ -186,7 +187,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
local_irq_disable_exit_to_user(); local_irq_disable_exit_to_user();
/* Check if any of the above work has queued a deferred wakeup */ /* Check if any of the above work has queued a deferred wakeup */
rcu_nocb_flush_deferred_wakeup(); tick_nohz_user_enter_prepare();
ti_work = READ_ONCE(current_thread_info()->flags); ti_work = READ_ONCE(current_thread_info()->flags);
} }
...@@ -202,7 +203,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs) ...@@ -202,7 +203,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
/* Flush pending rcuog wakeup before the last need_resched() check */ /* Flush pending rcuog wakeup before the last need_resched() check */
rcu_nocb_flush_deferred_wakeup(); tick_nohz_user_enter_prepare();
if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
ti_work = exit_to_user_mode_loop(regs, ti_work); ti_work = exit_to_user_mode_loop(regs, ti_work);
......
...@@ -885,6 +885,7 @@ static const struct seq_operations sched_debug_sops = { ...@@ -885,6 +885,7 @@ static const struct seq_operations sched_debug_sops = {
#define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F)) #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
#define __P(F) __PS(#F, F) #define __P(F) __PS(#F, F)
#define P(F) __PS(#F, p->F) #define P(F) __PS(#F, p->F)
#define PM(F, M) __PS(#F, p->F & (M))
#define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F))) #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
#define __PN(F) __PSN(#F, F) #define __PN(F) __PSN(#F, F)
#define PN(F) __PSN(#F, p->F) #define PN(F) __PSN(#F, p->F)
...@@ -1011,7 +1012,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, ...@@ -1011,7 +1012,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.avg.util_avg); P(se.avg.util_avg);
P(se.avg.last_update_time); P(se.avg.last_update_time);
P(se.avg.util_est.ewma); P(se.avg.util_est.ewma);
P(se.avg.util_est.enqueued); PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
#endif #endif
#ifdef CONFIG_UCLAMP_TASK #ifdef CONFIG_UCLAMP_TASK
__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value); __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
......
...@@ -3499,10 +3499,9 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf ...@@ -3499,10 +3499,9 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
static inline void static inline void
update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{ {
long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum; long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
unsigned long load_avg; unsigned long load_avg;
u64 load_sum = 0; u64 load_sum = 0;
s64 delta_sum;
u32 divider; u32 divider;
if (!runnable_sum) if (!runnable_sum)
...@@ -3549,13 +3548,13 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq ...@@ -3549,13 +3548,13 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
load_sum = (s64)se_weight(se) * runnable_sum; load_sum = (s64)se_weight(se) * runnable_sum;
load_avg = div_s64(load_sum, divider); load_avg = div_s64(load_sum, divider);
delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; delta = load_avg - se->avg.load_avg;
delta_avg = load_avg - se->avg.load_avg;
se->avg.load_sum = runnable_sum; se->avg.load_sum = runnable_sum;
se->avg.load_avg = load_avg; se->avg.load_avg = load_avg;
add_positive(&cfs_rq->avg.load_avg, delta_avg);
add_positive(&cfs_rq->avg.load_sum, delta_sum); add_positive(&cfs_rq->avg.load_avg, delta);
cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
} }
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
...@@ -3766,11 +3765,17 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s ...@@ -3766,11 +3765,17 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
*/ */
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{ {
/*
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
* See ___update_load_avg() for details.
*/
u32 divider = get_pelt_divider(&cfs_rq->avg);
dequeue_load_avg(cfs_rq, se); dequeue_load_avg(cfs_rq, se);
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
...@@ -3902,7 +3907,7 @@ static inline unsigned long _task_util_est(struct task_struct *p) ...@@ -3902,7 +3907,7 @@ static inline unsigned long _task_util_est(struct task_struct *p)
{ {
struct util_est ue = READ_ONCE(p->se.avg.util_est); struct util_est ue = READ_ONCE(p->se.avg.util_est);
return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED); return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
} }
static inline unsigned long task_util_est(struct task_struct *p) static inline unsigned long task_util_est(struct task_struct *p)
...@@ -4002,7 +4007,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq, ...@@ -4002,7 +4007,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
* Reset EWMA on utilization increases, the moving average is used only * Reset EWMA on utilization increases, the moving average is used only
* to smooth utilization decreases. * to smooth utilization decreases.
*/ */
ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED); ue.enqueued = task_util(p);
if (sched_feat(UTIL_EST_FASTUP)) { if (sched_feat(UTIL_EST_FASTUP)) {
if (ue.ewma < ue.enqueued) { if (ue.ewma < ue.enqueued) {
ue.ewma = ue.enqueued; ue.ewma = ue.enqueued;
...@@ -4051,6 +4056,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq, ...@@ -4051,6 +4056,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
ue.ewma += last_ewma_diff; ue.ewma += last_ewma_diff;
ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
done: done:
ue.enqueued |= UTIL_AVG_UNCHANGED;
WRITE_ONCE(p->se.avg.util_est, ue); WRITE_ONCE(p->se.avg.util_est, ue);
trace_sched_util_est_se_tp(&p->se); trace_sched_util_est_se_tp(&p->se);
...@@ -8030,7 +8036,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done) ...@@ -8030,7 +8036,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
/* Propagate pending load changes to the parent, if any: */ /* Propagate pending load changes to the parent, if any: */
se = cfs_rq->tg->se[cpu]; se = cfs_rq->tg->se[cpu];
if (se && !skip_blocked_update(se)) if (se && !skip_blocked_update(se))
update_load_avg(cfs_rq_of(se), se, 0); update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
/* /*
* There can be a lot of idle CPU cgroups. Don't let fully * There can be a lot of idle CPU cgroups. Don't let fully
......
...@@ -42,15 +42,6 @@ static inline u32 get_pelt_divider(struct sched_avg *avg) ...@@ -42,15 +42,6 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
return LOAD_AVG_MAX - 1024 + avg->period_contrib; return LOAD_AVG_MAX - 1024 + avg->period_contrib;
} }
/*
* When a task is dequeued, its estimated utilization should not be update if
* its util_avg has not been updated at least once.
* This flag is used to synchronize util_avg updates with util_est updates.
* We map this information into the LSB bit of the utilization saved at
* dequeue time (i.e. util_est.dequeued).
*/
#define UTIL_AVG_UNCHANGED 0x1
static inline void cfs_se_util_change(struct sched_avg *avg) static inline void cfs_se_util_change(struct sched_avg *avg)
{ {
unsigned int enqueued; unsigned int enqueued;
...@@ -58,7 +49,7 @@ static inline void cfs_se_util_change(struct sched_avg *avg) ...@@ -58,7 +49,7 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
if (!sched_feat(UTIL_EST)) if (!sched_feat(UTIL_EST))
return; return;
/* Avoid store if the flag has been already set */ /* Avoid store if the flag has been already reset */
enqueued = avg->util_est.enqueued; enqueued = avg->util_est.enqueued;
if (!(enqueued & UTIL_AVG_UNCHANGED)) if (!(enqueued & UTIL_AVG_UNCHANGED))
return; return;
......
...@@ -230,6 +230,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) ...@@ -230,6 +230,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
#ifdef CONFIG_NO_HZ_FULL #ifdef CONFIG_NO_HZ_FULL
cpumask_var_t tick_nohz_full_mask; cpumask_var_t tick_nohz_full_mask;
EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
bool tick_nohz_full_running; bool tick_nohz_full_running;
EXPORT_SYMBOL_GPL(tick_nohz_full_running); EXPORT_SYMBOL_GPL(tick_nohz_full_running);
static atomic_t tick_dep_mask; static atomic_t tick_dep_mask;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment