Commit af5c72b1 authored by Al Viro's avatar Al Viro

Fix aio_poll() races

aio_poll() has to cope with several unpleasant problems:
	* requests that might stay around indefinitely need to
be made visible for io_cancel(2); that must not be done to
a request already completed, though.
	* in cases when ->poll() has placed us on a waitqueue,
wakeup might have happened (and request completed) before ->poll()
returns.
	* worse, in some early wakeup cases request might end
up re-added into the queue later - we can't treat "woken up and
currently not in the queue" as "it's not going to stick around
indefinitely"
	* ... moreover, ->poll() might have decided not to
put it on any queues to start with, and that needs to be distinguished
from the previous case
	* ->poll() might have tried to put us on more than one queue.
Only the first will succeed for aio poll, so we might end up missing
wakeups.  OTOH, we might very well notice that only after the
wakeup hits and request gets completed (all before ->poll() gets
around to the second poll_wait()).  In that case it's too late to
decide that we have an error.

req->woken was an attempt to deal with that.  Unfortunately, it was
broken.  What we need to keep track of is not that wakeup has happened -
the thing might come back after that.  It's that async reference is
already gone and won't come back, so we can't (and needn't) put the
request on the list of cancellables.

The easiest case is "request hadn't been put on any waitqueues"; we
can tell by seeing NULL apt.head, and in that case there won't be
anything async.  We should either complete the request ourselves
(if vfs_poll() reports anything of interest) or return an error.

In all other cases we get exclusion with wakeups by grabbing the
queue lock.

If request is currently on queue and we have something interesting
from vfs_poll(), we can steal it and complete the request ourselves.

If it's on queue and vfs_poll() has not reported anything interesting,
we either put it on the cancellable list, or, if we know that it
hadn't been put on all queues ->poll() wanted it on, we steal it and
return an error.

If it's _not_ on queue, it's either been already dealt with (in which
case we do nothing), or there's aio_poll_complete_work() about to be
executed.  In that case we either put it on the cancellable list,
or, if we know it hadn't been put on all queues ->poll() wanted it on,
simulate what cancel would've done.

It's a lot more convoluted than I'd like it to be.  Single-consumer APIs
suck, and unfortunately aio is not an exception...
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 2bb874c0
...@@ -181,7 +181,7 @@ struct poll_iocb { ...@@ -181,7 +181,7 @@ struct poll_iocb {
struct file *file; struct file *file;
struct wait_queue_head *head; struct wait_queue_head *head;
__poll_t events; __poll_t events;
bool woken; bool done;
bool cancelled; bool cancelled;
struct wait_queue_entry wait; struct wait_queue_entry wait;
struct work_struct work; struct work_struct work;
...@@ -1606,12 +1606,6 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, ...@@ -1606,12 +1606,6 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
return 0; return 0;
} }
static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
{
iocb->ki_res.res = mangle_poll(mask);
iocb_put(iocb);
}
static void aio_poll_complete_work(struct work_struct *work) static void aio_poll_complete_work(struct work_struct *work)
{ {
struct poll_iocb *req = container_of(work, struct poll_iocb, work); struct poll_iocb *req = container_of(work, struct poll_iocb, work);
...@@ -1637,9 +1631,11 @@ static void aio_poll_complete_work(struct work_struct *work) ...@@ -1637,9 +1631,11 @@ static void aio_poll_complete_work(struct work_struct *work)
return; return;
} }
list_del_init(&iocb->ki_list); list_del_init(&iocb->ki_list);
iocb->ki_res.res = mangle_poll(mask);
req->done = true;
spin_unlock_irq(&ctx->ctx_lock); spin_unlock_irq(&ctx->ctx_lock);
aio_poll_complete(iocb, mask); iocb_put(iocb);
} }
/* assumes we are called with irqs disabled */ /* assumes we are called with irqs disabled */
...@@ -1667,31 +1663,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, ...@@ -1667,31 +1663,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
__poll_t mask = key_to_poll(key); __poll_t mask = key_to_poll(key);
unsigned long flags; unsigned long flags;
req->woken = true;
/* for instances that support it check for an event match first: */ /* for instances that support it check for an event match first: */
if (mask) { if (mask && !(mask & req->events))
if (!(mask & req->events))
return 0; return 0;
list_del_init(&req->wait.entry);
if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
/* /*
* Try to complete the iocb inline if we can. Use * Try to complete the iocb inline if we can. Use
* irqsave/irqrestore because not all filesystems (e.g. fuse) * irqsave/irqrestore because not all filesystems (e.g. fuse)
* call this function with IRQs disabled and because IRQs * call this function with IRQs disabled and because IRQs
* have to be disabled before ctx_lock is obtained. * have to be disabled before ctx_lock is obtained.
*/ */
if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
list_del(&iocb->ki_list); list_del(&iocb->ki_list);
iocb->ki_res.res = mangle_poll(mask);
req->done = true;
spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags); spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
iocb_put(iocb);
list_del_init(&req->wait.entry); } else {
aio_poll_complete(iocb, mask);
return 1;
}
}
list_del_init(&req->wait.entry);
schedule_work(&req->work); schedule_work(&req->work);
}
return 1; return 1;
} }
...@@ -1723,6 +1715,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) ...@@ -1723,6 +1715,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
struct kioctx *ctx = aiocb->ki_ctx; struct kioctx *ctx = aiocb->ki_ctx;
struct poll_iocb *req = &aiocb->poll; struct poll_iocb *req = &aiocb->poll;
struct aio_poll_table apt; struct aio_poll_table apt;
bool cancel = false;
__poll_t mask; __poll_t mask;
/* reject any unknown events outside the normal event mask. */ /* reject any unknown events outside the normal event mask. */
...@@ -1736,7 +1729,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) ...@@ -1736,7 +1729,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
req->head = NULL; req->head = NULL;
req->woken = false; req->done = false;
req->cancelled = false; req->cancelled = false;
apt.pt._qproc = aio_poll_queue_proc; apt.pt._qproc = aio_poll_queue_proc;
...@@ -1749,36 +1742,33 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) ...@@ -1749,36 +1742,33 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
init_waitqueue_func_entry(&req->wait, aio_poll_wake); init_waitqueue_func_entry(&req->wait, aio_poll_wake);
mask = vfs_poll(req->file, &apt.pt) & req->events; mask = vfs_poll(req->file, &apt.pt) & req->events;
if (unlikely(!req->head)) {
/* we did not manage to set up a waitqueue, done */
goto out;
}
spin_lock_irq(&ctx->ctx_lock); spin_lock_irq(&ctx->ctx_lock);
if (likely(req->head)) {
spin_lock(&req->head->lock); spin_lock(&req->head->lock);
if (req->woken) { if (unlikely(list_empty(&req->wait.entry))) {
/* wake_up context handles the rest */ if (apt.error)
mask = 0; cancel = true;
apt.error = 0; apt.error = 0;
} else if (mask || apt.error) { mask = 0;
/* if we get an error or a mask we are done */ }
WARN_ON_ONCE(list_empty(&req->wait.entry)); if (mask || apt.error) {
list_del_init(&req->wait.entry); list_del_init(&req->wait.entry);
} else { } else if (cancel) {
/* actually waiting for an event */ WRITE_ONCE(req->cancelled, true);
} else if (!req->done) { /* actually waiting for an event */
list_add_tail(&aiocb->ki_list, &ctx->active_reqs); list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
aiocb->ki_cancel = aio_poll_cancel; aiocb->ki_cancel = aio_poll_cancel;
} }
spin_unlock(&req->head->lock); spin_unlock(&req->head->lock);
}
if (mask) { /* no async, we'd stolen it */
aiocb->ki_res.res = mangle_poll(mask);
apt.error = 0;
}
spin_unlock_irq(&ctx->ctx_lock); spin_unlock_irq(&ctx->ctx_lock);
out:
if (unlikely(apt.error))
return apt.error;
if (mask) if (mask)
aio_poll_complete(aiocb, mask); iocb_put(aiocb);
return 0; return apt.error;
} }
static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment