Commit 8c2090c5 authored by Tejun Heo's avatar Tejun Heo

sched_ext: Initialize in bypass mode

scx_ops_enable() used preempt_disable() around the task iteration loop to
switch tasks into SCX to guarantee forward progress of the task which is
running scx_ops_enable(). However, in the gap between setting
__scx_ops_enabled and preeempt_disable(), an external entity can put tasks
including the enabling one into SCX prematurely, which can lead to
malfunctions including stalls.

The bypass mode can wrap the entire enabling operation and guarantee forward
progress no matter what the BPF scheduler does. Use the bypass mode instead
to guarantee forward progress while enabling.

While at it, release and regrab scx_tasks_lock between the two task
iteration locks in scx_ops_enable() for clarity as there is no reason to
keep holding the lock between them.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent fc1fcebe
...@@ -5075,6 +5075,14 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link) ...@@ -5075,6 +5075,14 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
queue_delayed_work(system_unbound_wq, &scx_watchdog_work, queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
scx_watchdog_timeout / 2); scx_watchdog_timeout / 2);
/*
* Once __scx_ops_enabled is set, %current can be switched to SCX
* anytime. This can lead to stalls as some BPF schedulers (e.g.
* userspace scheduling) may not function correctly before all tasks are
* switched. Init in bypass mode to guarantee forward progress.
*/
scx_ops_bypass(true);
/* /*
* Lock out forks, cgroup on/offlining and moves before opening the * Lock out forks, cgroup on/offlining and moves before opening the
* floodgate so that they don't wander into the operations prematurely. * floodgate so that they don't wander into the operations prematurely.
...@@ -5134,7 +5142,6 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link) ...@@ -5134,7 +5142,6 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
* disabled. * disabled.
*/ */
spin_lock_irq(&scx_tasks_lock); spin_lock_irq(&scx_tasks_lock);
scx_task_iter_init(&sti); scx_task_iter_init(&sti);
while ((p = scx_task_iter_next_locked(&sti))) { while ((p = scx_task_iter_next_locked(&sti))) {
/* /*
...@@ -5163,22 +5170,19 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link) ...@@ -5163,22 +5170,19 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
spin_lock_irq(&scx_tasks_lock); spin_lock_irq(&scx_tasks_lock);
} }
scx_task_iter_exit(&sti); scx_task_iter_exit(&sti);
spin_unlock_irq(&scx_tasks_lock);
/* /*
* All tasks are prepped but are still ops-disabled. Ensure that * All tasks are prepped but the tasks are not enabled. Switch everyone.
* %current can't be scheduled out and switch everyone.
* preempt_disable() is necessary because we can't guarantee that
* %current won't be starved if scheduled out while switching.
*/ */
preempt_disable(); WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
/* /*
* We're fully committed and can't fail. The PREPPED -> ENABLED * We're fully committed and can't fail. The PREPPED -> ENABLED
* transitions here are synchronized against sched_ext_free() through * transitions here are synchronized against sched_ext_free() through
* scx_tasks_lock. * scx_tasks_lock.
*/ */
WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL)); spin_lock_irq(&scx_tasks_lock);
scx_task_iter_init(&sti); scx_task_iter_init(&sti);
while ((p = scx_task_iter_next_locked(&sti))) { while ((p = scx_task_iter_next_locked(&sti))) {
const struct sched_class *old_class = p->sched_class; const struct sched_class *old_class = p->sched_class;
...@@ -5195,12 +5199,12 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link) ...@@ -5195,12 +5199,12 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
check_class_changed(task_rq(p), p, old_class, p->prio); check_class_changed(task_rq(p), p, old_class, p->prio);
} }
scx_task_iter_exit(&sti); scx_task_iter_exit(&sti);
spin_unlock_irq(&scx_tasks_lock); spin_unlock_irq(&scx_tasks_lock);
preempt_enable();
scx_cgroup_unlock(); scx_cgroup_unlock();
cpus_read_unlock(); cpus_read_unlock();
percpu_up_write(&scx_fork_rwsem); percpu_up_write(&scx_fork_rwsem);
scx_ops_bypass(false);
/* /*
* Returning an error code here would lose the recorded error * Returning an error code here would lose the recorded error
...@@ -5241,6 +5245,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link) ...@@ -5241,6 +5245,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
err_disable_unlock_all: err_disable_unlock_all:
scx_cgroup_unlock(); scx_cgroup_unlock();
percpu_up_write(&scx_fork_rwsem); percpu_up_write(&scx_fork_rwsem);
scx_ops_bypass(false);
err_disable_unlock_cpus: err_disable_unlock_cpus:
cpus_read_unlock(); cpus_read_unlock();
err_disable: err_disable:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment