Commit 1962da0d authored by Christoph Hellwig's avatar Christoph Hellwig

aio: try to complete poll iocbs without context switch

If we can acquire ctx_lock without spinning we can just remove our
iocb from the active_reqs list, and thus complete the iocbs from the
wakeup context.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 2c14fa83
...@@ -1633,6 +1633,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, ...@@ -1633,6 +1633,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
void *key) void *key)
{ {
struct poll_iocb *req = container_of(wait, struct poll_iocb, wait); struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
struct file *file = req->file; struct file *file = req->file;
__poll_t mask = key_to_poll(key); __poll_t mask = key_to_poll(key);
...@@ -1648,9 +1649,22 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, ...@@ -1648,9 +1649,22 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
__aio_poll_remove(req); __aio_poll_remove(req);
/*
* Try completing without a context switch if we can acquire ctx_lock
* without spinning. Otherwise we need to defer to a workqueue to
* avoid a deadlock due to the lock order.
*/
if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
list_del_init(&iocb->ki_list);
spin_unlock(&iocb->ki_ctx->ctx_lock);
__aio_poll_complete(req, mask);
} else {
req->events = mask; req->events = mask;
INIT_WORK(&req->work, aio_poll_work); INIT_WORK(&req->work, aio_poll_work);
schedule_work(&req->work); schedule_work(&req->work);
}
return 1; return 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment