Commit 85e511df authored by Peter Zijlstra's avatar Peter Zijlstra

sched/eevdf: Allow shorter slices to wakeup-preempt

Part of the reason to have shorter slices is to improve
responsiveness. Allow shorter slices to preempt longer slices on
wakeup.

    Task                  |   Runtime ms  | Switches | Avg delay ms    | Max delay ms    | Sum delay ms     |

  100ms massive_intr 500us cyclictest NO_PREEMPT_SHORT

  1 massive_intr:(5)      | 846018.956 ms |   779188 | avg:   0.273 ms | max:  58.337 ms | sum:212545.245 ms |
  2 massive_intr:(5)      | 853450.693 ms |   792269 | avg:   0.275 ms | max:  71.193 ms | sum:218263.588 ms |
  3 massive_intr:(5)      | 843888.920 ms |   771456 | avg:   0.277 ms | max:  92.405 ms | sum:213353.221 ms |
  1 chromium-browse:(8)   |  53015.889 ms |   131766 | avg:   0.463 ms | max:  36.341 ms | sum:60959.230  ms |
  2 chromium-browse:(8)   |  53864.088 ms |   136962 | avg:   0.480 ms | max:  27.091 ms | sum:65687.681  ms |
  3 chromium-browse:(9)   |  53637.904 ms |   132637 | avg:   0.481 ms | max:  24.756 ms | sum:63781.673  ms |
  1 cyclictest:(5)        |  12615.604 ms |   639689 | avg:   0.471 ms | max:  32.272 ms | sum:301351.094 ms |
  2 cyclictest:(5)        |  12511.583 ms |   642578 | avg:   0.448 ms | max:  44.243 ms | sum:287632.830 ms |
  3 cyclictest:(5)        |  12545.867 ms |   635953 | avg:   0.475 ms | max:  25.530 ms | sum:302374.658 ms |

  100ms massive_intr 500us cyclictest PREEMPT_SHORT

  1 massive_intr:(5)      | 839843.919 ms |   837384 | avg:   0.264 ms | max:  74.366 ms | sum:221476.885 ms |
  2 massive_intr:(5)      | 852449.913 ms |   845086 | avg:   0.252 ms | max:  68.162 ms | sum:212595.968 ms |
  3 massive_intr:(5)      | 839180.725 ms |   836883 | avg:   0.266 ms | max:  69.742 ms | sum:222812.038 ms |
  1 chromium-browse:(11)  |  54591.481 ms |   138388 | avg:   0.458 ms | max:  35.427 ms | sum:63401.508  ms |
  2 chromium-browse:(8)   |  52034.541 ms |   132276 | avg:   0.436 ms | max:  31.826 ms | sum:57732.958  ms |
  3 chromium-browse:(8)   |  55231.771 ms |   141892 | avg:   0.469 ms | max:  27.607 ms | sum:66538.697  ms |
  1 cyclictest:(5)        |  13156.391 ms |   667412 | avg:   0.373 ms | max:  38.247 ms | sum:249174.502 ms |
  2 cyclictest:(5)        |  12688.939 ms |   665144 | avg:   0.374 ms | max:  33.548 ms | sum:248509.392 ms |
  3 cyclictest:(5)        |  13475.623 ms |   669110 | avg:   0.370 ms | max:  37.819 ms | sum:247673.390 ms |

As per the numbers the, this makes cyclictest (short slice) it's
max-delay more consistent and consistency drops the sum-delay. The
trade-off is that the massive_intr (long slice) gets more context
switches and a slight increase in sum-delay.

Chunxin contributed did_preempt_short() where a task that lost slice
protection from PREEMPT_SHORT gets rescheduled once it becomes
in-eligible.

[mike: numbers]
Co-Developed-by: default avatarChunxin Zang <zangchunxin@lixiang.com>
Signed-off-by: default avatarChunxin Zang <zangchunxin@lixiang.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: default avatarValentin Schneider <vschneid@redhat.com>
Tested-by: default avatarMike Galbraith <umgwanakikbuti@gmail.com>
Link: https://lkml.kernel.org/r/20240727105030.735459544@infradead.org
parent 82e9d045
...@@ -973,10 +973,10 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se); ...@@ -973,10 +973,10 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
* XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i * XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i
* this is probably good enough. * this is probably good enough.
*/ */
static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) static bool update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
{ {
if ((s64)(se->vruntime - se->deadline) < 0) if ((s64)(se->vruntime - se->deadline) < 0)
return; return false;
/* /*
* For EEVDF the virtual time slope is determined by w_i (iow. * For EEVDF the virtual time slope is determined by w_i (iow.
...@@ -993,10 +993,7 @@ static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -993,10 +993,7 @@ static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
/* /*
* The task has consumed its request, reschedule. * The task has consumed its request, reschedule.
*/ */
if (cfs_rq->nr_running > 1) { return true;
resched_curr(rq_of(cfs_rq));
clear_buddies(cfs_rq, se);
}
} }
#include "pelt.h" #include "pelt.h"
...@@ -1134,6 +1131,38 @@ static inline void update_curr_task(struct task_struct *p, s64 delta_exec) ...@@ -1134,6 +1131,38 @@ static inline void update_curr_task(struct task_struct *p, s64 delta_exec)
dl_server_update(p->dl_server, delta_exec); dl_server_update(p->dl_server, delta_exec);
} }
static inline bool did_preempt_short(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
if (!sched_feat(PREEMPT_SHORT))
return false;
if (curr->vlag == curr->deadline)
return false;
return !entity_eligible(cfs_rq, curr);
}
static inline bool do_preempt_short(struct cfs_rq *cfs_rq,
struct sched_entity *pse, struct sched_entity *se)
{
if (!sched_feat(PREEMPT_SHORT))
return false;
if (pse->slice >= se->slice)
return false;
if (!entity_eligible(cfs_rq, pse))
return false;
if (entity_before(pse, se))
return true;
if (!entity_eligible(cfs_rq, se))
return true;
return false;
}
/* /*
* Used by other classes to account runtime. * Used by other classes to account runtime.
*/ */
...@@ -1157,6 +1186,7 @@ static void update_curr(struct cfs_rq *cfs_rq) ...@@ -1157,6 +1186,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
struct sched_entity *curr = cfs_rq->curr; struct sched_entity *curr = cfs_rq->curr;
struct rq *rq = rq_of(cfs_rq); struct rq *rq = rq_of(cfs_rq);
s64 delta_exec; s64 delta_exec;
bool resched;
if (unlikely(!curr)) if (unlikely(!curr))
return; return;
...@@ -1166,7 +1196,7 @@ static void update_curr(struct cfs_rq *cfs_rq) ...@@ -1166,7 +1196,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
return; return;
curr->vruntime += calc_delta_fair(delta_exec, curr); curr->vruntime += calc_delta_fair(delta_exec, curr);
update_deadline(cfs_rq, curr); resched = update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq); update_min_vruntime(cfs_rq);
if (entity_is_task(curr)) { if (entity_is_task(curr)) {
...@@ -1184,6 +1214,14 @@ static void update_curr(struct cfs_rq *cfs_rq) ...@@ -1184,6 +1214,14 @@ static void update_curr(struct cfs_rq *cfs_rq)
} }
account_cfs_rq_runtime(cfs_rq, delta_exec); account_cfs_rq_runtime(cfs_rq, delta_exec);
if (rq->nr_running == 1)
return;
if (resched || did_preempt_short(cfs_rq, curr)) {
resched_curr(rq);
clear_buddies(cfs_rq, curr);
}
} }
static void update_curr_fair(struct rq *rq) static void update_curr_fair(struct rq *rq)
...@@ -8605,7 +8643,17 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int ...@@ -8605,7 +8643,17 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
cfs_rq = cfs_rq_of(se); cfs_rq = cfs_rq_of(se);
update_curr(cfs_rq); update_curr(cfs_rq);
/* /*
* XXX pick_eevdf(cfs_rq) != se ? * If @p has a shorter slice than current and @p is eligible, override
* current's slice protection in order to allow preemption.
*
* Note that even if @p does not turn out to be the most eligible
* task at this moment, current's slice protection will be lost.
*/
if (do_preempt_short(cfs_rq, pse, se) && se->vlag == se->deadline)
se->vlag = se->deadline + 1;
/*
* If @p has become the most eligible task, force preemption.
*/ */
if (pick_eevdf(cfs_rq) == pse) if (pick_eevdf(cfs_rq) == pse)
goto preempt; goto preempt;
......
...@@ -18,6 +18,11 @@ SCHED_FEAT(PLACE_REL_DEADLINE, true) ...@@ -18,6 +18,11 @@ SCHED_FEAT(PLACE_REL_DEADLINE, true)
* 0-lag point or until is has exhausted it's slice. * 0-lag point or until is has exhausted it's slice.
*/ */
SCHED_FEAT(RUN_TO_PARITY, true) SCHED_FEAT(RUN_TO_PARITY, true)
/*
* Allow wakeup of tasks with a shorter slice to cancel RESPECT_SLICE for
* current.
*/
SCHED_FEAT(PREEMPT_SHORT, true)
/* /*
* Prefer to schedule the task we woke last (assuming it failed * Prefer to schedule the task we woke last (assuming it failed
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment