Commit d224985a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/wait, drivers/drm: Convert wait_on_atomic_t() usage to the new wait_var_event() API

The old wait_on_atomic_t() is going to get removed, use the more
flexible wait_var_event() API instead.

Unlike wake_up_atomic_t(), wake_up_var() will issue the wakeup
even if the variable is not 0.

No change in functionality.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@intel.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6b2bb726
...@@ -177,8 +177,9 @@ static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to) ...@@ -177,8 +177,9 @@ static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
res = pos - iocb->ki_pos; res = pos - iocb->ki_pos;
iocb->ki_pos = pos; iocb->ki_pos = pos;
atomic_dec(&aux_dev->usecount); if (atomic_dec_and_test(&aux_dev->usecount))
wake_up_atomic_t(&aux_dev->usecount); wake_up_var(&aux_dev->usecount);
return res; return res;
} }
...@@ -218,8 +219,9 @@ static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -218,8 +219,9 @@ static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
res = pos - iocb->ki_pos; res = pos - iocb->ki_pos;
iocb->ki_pos = pos; iocb->ki_pos = pos;
atomic_dec(&aux_dev->usecount); if (atomic_dec_and_test(&aux_dev->usecount))
wake_up_atomic_t(&aux_dev->usecount); wake_up_var(&aux_dev->usecount);
return res; return res;
} }
...@@ -277,8 +279,7 @@ void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux) ...@@ -277,8 +279,7 @@ void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
mutex_unlock(&aux_idr_mutex); mutex_unlock(&aux_idr_mutex);
atomic_dec(&aux_dev->usecount); atomic_dec(&aux_dev->usecount);
wait_on_atomic_t(&aux_dev->usecount, atomic_t_wait, wait_var_event(&aux_dev->usecount, !atomic_read(&aux_dev->usecount));
TASK_UNINTERRUPTIBLE);
minor = aux_dev->index; minor = aux_dev->index;
if (aux_dev->dev) if (aux_dev->dev)
......
...@@ -271,18 +271,13 @@ struct igt_wakeup { ...@@ -271,18 +271,13 @@ struct igt_wakeup {
u32 seqno; u32 seqno;
}; };
static int wait_atomic_timeout(atomic_t *p, unsigned int mode)
{
return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT;
}
static bool wait_for_ready(struct igt_wakeup *w) static bool wait_for_ready(struct igt_wakeup *w)
{ {
DEFINE_WAIT(ready); DEFINE_WAIT(ready);
set_bit(IDLE, &w->flags); set_bit(IDLE, &w->flags);
if (atomic_dec_and_test(w->done)) if (atomic_dec_and_test(w->done))
wake_up_atomic_t(w->done); wake_up_var(w->done);
if (test_bit(STOP, &w->flags)) if (test_bit(STOP, &w->flags))
goto out; goto out;
...@@ -299,7 +294,7 @@ static bool wait_for_ready(struct igt_wakeup *w) ...@@ -299,7 +294,7 @@ static bool wait_for_ready(struct igt_wakeup *w)
out: out:
clear_bit(IDLE, &w->flags); clear_bit(IDLE, &w->flags);
if (atomic_dec_and_test(w->set)) if (atomic_dec_and_test(w->set))
wake_up_atomic_t(w->set); wake_up_var(w->set);
return !test_bit(STOP, &w->flags); return !test_bit(STOP, &w->flags);
} }
...@@ -342,7 +337,7 @@ static void igt_wake_all_sync(atomic_t *ready, ...@@ -342,7 +337,7 @@ static void igt_wake_all_sync(atomic_t *ready,
atomic_set(ready, 0); atomic_set(ready, 0);
wake_up_all(wq); wake_up_all(wq);
wait_on_atomic_t(set, atomic_t_wait, TASK_UNINTERRUPTIBLE); wait_var_event(set, !atomic_read(set));
atomic_set(ready, count); atomic_set(ready, count);
atomic_set(done, count); atomic_set(done, count);
} }
...@@ -350,7 +345,6 @@ static void igt_wake_all_sync(atomic_t *ready, ...@@ -350,7 +345,6 @@ static void igt_wake_all_sync(atomic_t *ready,
static int igt_wakeup(void *arg) static int igt_wakeup(void *arg)
{ {
I915_RND_STATE(prng); I915_RND_STATE(prng);
const int state = TASK_UNINTERRUPTIBLE;
struct intel_engine_cs *engine = arg; struct intel_engine_cs *engine = arg;
struct igt_wakeup *waiters; struct igt_wakeup *waiters;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
...@@ -418,7 +412,7 @@ static int igt_wakeup(void *arg) ...@@ -418,7 +412,7 @@ static int igt_wakeup(void *arg)
* that they are ready for the next test. We wait until all * that they are ready for the next test. We wait until all
* threads are complete and waiting for us (i.e. not a seqno). * threads are complete and waiting for us (i.e. not a seqno).
*/ */
err = wait_on_atomic_t(&done, wait_atomic_timeout, state); err = wait_var_event_timeout(&done, !atomic_read(&done), 10 * HZ);
if (err) { if (err) {
pr_err("Timed out waiting for %d remaining waiters\n", pr_err("Timed out waiting for %d remaining waiters\n",
atomic_read(&done)); atomic_read(&done));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment