Commit f9fc8cad authored by Peter Zijlstra's avatar Peter Zijlstra

sched: Add TASK_ANY for wait_task_inactive()

Now that wait_task_inactive()'s @match_state argument is a mask (like
ttwu()) it is possible to replace the special !match_state case with
an 'all-states' value such that any blocked state will match.

Suggested-by: Ingo Molnar (mingo@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/YxhkzfuFTvRnpUaH@hirez.programming.kicks-ass.net
parent 9204a97f
...@@ -254,7 +254,7 @@ void idle_inject_stop(struct idle_inject_device *ii_dev) ...@@ -254,7 +254,7 @@ void idle_inject_stop(struct idle_inject_device *ii_dev)
iit = per_cpu_ptr(&idle_inject_thread, cpu); iit = per_cpu_ptr(&idle_inject_thread, cpu);
iit->should_run = 0; iit->should_run = 0;
wait_task_inactive(iit->tsk, 0); wait_task_inactive(iit->tsk, TASK_ANY);
} }
cpu_hotplug_enable(); cpu_hotplug_enable();
......
...@@ -412,7 +412,7 @@ static int coredump_wait(int exit_code, struct core_state *core_state) ...@@ -412,7 +412,7 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
*/ */
ptr = core_state->dumper.next; ptr = core_state->dumper.next;
while (ptr != NULL) { while (ptr != NULL) {
wait_task_inactive(ptr->task, 0); wait_task_inactive(ptr->task, TASK_ANY);
ptr = ptr->next; ptr = ptr->next;
} }
} }
......
...@@ -101,6 +101,8 @@ struct task_group; ...@@ -101,6 +101,8 @@ struct task_group;
#define TASK_RTLOCK_WAIT 0x1000 #define TASK_RTLOCK_WAIT 0x1000
#define TASK_STATE_MAX 0x2000 #define TASK_STATE_MAX 0x2000
#define TASK_ANY (TASK_STATE_MAX-1)
/* Convenience macros for the sake of set_current_state: */ /* Convenience macros for the sake of set_current_state: */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
......
...@@ -3253,12 +3253,12 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, ...@@ -3253,12 +3253,12 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
/* /*
* wait_task_inactive - wait for a thread to unschedule. * wait_task_inactive - wait for a thread to unschedule.
* *
* If @match_state is nonzero, it's the @p->state value just checked and * Wait for the thread to block in any of the states set in @match_state.
* not expected to change. If it changes, i.e. @p might have woken up, * If it changes, i.e. @p might have woken up, then return zero. When we
* then return zero. When we succeed in waiting for @p to be off its CPU, * succeed in waiting for @p to be off its CPU, we return a positive number
* we return a positive number (its total switch count). If a second call * (its total switch count). If a second call a short while later returns the
* a short while later returns the same number, the caller can be sure that * same number, the caller can be sure that @p has remained unscheduled the
* @p has remained unscheduled the whole time. * whole time.
* *
* The caller must ensure that the task *will* unschedule sometime soon, * The caller must ensure that the task *will* unschedule sometime soon,
* else this function might spin for a *long* time. This function can't * else this function might spin for a *long* time. This function can't
...@@ -3294,7 +3294,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state ...@@ -3294,7 +3294,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
* is actually now running somewhere else! * is actually now running somewhere else!
*/ */
while (task_on_cpu(rq, p)) { while (task_on_cpu(rq, p)) {
if (match_state && !(READ_ONCE(p->__state) & match_state)) if (!(READ_ONCE(p->__state) & match_state))
return 0; return 0;
cpu_relax(); cpu_relax();
} }
...@@ -3309,7 +3309,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state ...@@ -3309,7 +3309,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
running = task_on_cpu(rq, p); running = task_on_cpu(rq, p);
queued = task_on_rq_queued(p); queued = task_on_rq_queued(p);
ncsw = 0; ncsw = 0;
if (!match_state || (READ_ONCE(p->__state) & match_state)) if (READ_ONCE(p->__state) & match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &rf); task_rq_unlock(rq, p, &rf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment