Commit 5204aa8c authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: add a helper for apoll alloc

Extract a helper function for apoll allocation, makes the code easier to
read.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/2f93282b47dd678e805dd0d7097f66968ced495c.1655990418.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 13a99017
...@@ -508,10 +508,33 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, ...@@ -508,10 +508,33 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
} }
static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct async_poll *apoll;
if (req->flags & REQ_F_POLLED) {
apoll = req->apoll;
kfree(apoll->double_poll);
} else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
!list_empty(&ctx->apoll_cache)) {
apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
poll.wait.entry);
list_del_init(&apoll->poll.wait.entry);
} else {
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
if (unlikely(!apoll))
return NULL;
}
apoll->double_poll = NULL;
req->apoll = apoll;
return apoll;
}
int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
{ {
const struct io_op_def *def = &io_op_defs[req->opcode]; const struct io_op_def *def = &io_op_defs[req->opcode];
struct io_ring_ctx *ctx = req->ctx;
struct async_poll *apoll; struct async_poll *apoll;
struct io_poll_table ipt; struct io_poll_table ipt;
__poll_t mask = POLLPRI | POLLERR | EPOLLET; __poll_t mask = POLLPRI | POLLERR | EPOLLET;
...@@ -546,21 +569,10 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) ...@@ -546,21 +569,10 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
} }
if (def->poll_exclusive) if (def->poll_exclusive)
mask |= EPOLLEXCLUSIVE; mask |= EPOLLEXCLUSIVE;
if (req->flags & REQ_F_POLLED) {
apoll = req->apoll; apoll = io_req_alloc_apoll(req, issue_flags);
kfree(apoll->double_poll); if (!apoll)
} else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
!list_empty(&ctx->apoll_cache)) {
apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
poll.wait.entry);
list_del_init(&apoll->poll.wait.entry);
} else {
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
if (unlikely(!apoll))
return IO_APOLL_ABORTED; return IO_APOLL_ABORTED;
}
apoll->double_poll = NULL;
req->apoll = apoll;
req->flags |= REQ_F_POLLED; req->flags |= REQ_F_POLLED;
ipt.pt._qproc = io_async_queue_proc; ipt.pt._qproc = io_async_queue_proc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment