Commit 6c2450ae authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: allocate memory for overflowed CQEs

Instead of using a request itself for overflowed CQE stashing, allocate a
separate entry. The disadvantage is that the allocation may fail and it
will be accounted as lost (see rings->cq_overflow), so we lose reliability
in case of memory pressure if the application is driving the CQ ring into
overflow. However, it opens a way for for multiple CQEs per an SQE and
even generating SQE-less CQEs.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
[axboe: use GFP_ATOMIC | __GFP_ACCOUNT]
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 464dca61
...@@ -202,6 +202,11 @@ struct io_mapped_ubuf { ...@@ -202,6 +202,11 @@ struct io_mapped_ubuf {
struct io_ring_ctx; struct io_ring_ctx;
struct io_overflow_cqe {
struct io_uring_cqe cqe;
struct list_head list;
};
struct io_rsrc_put { struct io_rsrc_put {
struct list_head list; struct list_head list;
union { union {
...@@ -1401,41 +1406,33 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) ...@@ -1401,41 +1406,33 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
} }
/* Returns true if there are no backlogged entries after the flush */ /* Returns true if there are no backlogged entries after the flush */
static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
struct task_struct *tsk,
struct files_struct *files)
{ {
struct io_rings *rings = ctx->rings; struct io_rings *rings = ctx->rings;
struct io_kiocb *req, *tmp;
struct io_uring_cqe *cqe;
unsigned long flags; unsigned long flags;
bool all_flushed, posted; bool all_flushed, posted;
LIST_HEAD(list);
if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries) if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
return false; return false;
posted = false; posted = false;
spin_lock_irqsave(&ctx->completion_lock, flags); spin_lock_irqsave(&ctx->completion_lock, flags);
list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) { while (!list_empty(&ctx->cq_overflow_list)) {
if (!io_match_task(req, tsk, files)) struct io_uring_cqe *cqe = io_get_cqring(ctx);
continue; struct io_overflow_cqe *ocqe;
cqe = io_get_cqring(ctx);
if (!cqe && !force) if (!cqe && !force)
break; break;
ocqe = list_first_entry(&ctx->cq_overflow_list,
list_move(&req->compl.list, &list); struct io_overflow_cqe, list);
if (cqe) { if (cqe)
WRITE_ONCE(cqe->user_data, req->user_data); memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
WRITE_ONCE(cqe->res, req->result); else
WRITE_ONCE(cqe->flags, req->compl.cflags);
} else {
ctx->cached_cq_overflow++;
WRITE_ONCE(ctx->rings->cq_overflow, WRITE_ONCE(ctx->rings->cq_overflow,
ctx->cached_cq_overflow); ++ctx->cached_cq_overflow);
}
posted = true; posted = true;
list_del(&ocqe->list);
kfree(ocqe);
} }
all_flushed = list_empty(&ctx->cq_overflow_list); all_flushed = list_empty(&ctx->cq_overflow_list);
...@@ -1450,19 +1447,10 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, ...@@ -1450,19 +1447,10 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (posted) if (posted)
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
while (!list_empty(&list)) {
req = list_first_entry(&list, struct io_kiocb, compl.list);
list_del(&req->compl.list);
io_put_req(req);
}
return all_flushed; return all_flushed;
} }
static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
struct task_struct *tsk,
struct files_struct *files)
{ {
bool ret = true; bool ret = true;
...@@ -1470,7 +1458,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, ...@@ -1470,7 +1458,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
/* iopoll syncs against uring_lock, not completion_lock */ /* iopoll syncs against uring_lock, not completion_lock */
if (ctx->flags & IORING_SETUP_IOPOLL) if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
ret = __io_cqring_overflow_flush(ctx, force, tsk, files); ret = __io_cqring_overflow_flush(ctx, force);
if (ctx->flags & IORING_SETUP_IOPOLL) if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
} }
...@@ -1531,29 +1519,33 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, ...@@ -1531,29 +1519,33 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res,
WRITE_ONCE(cqe->user_data, req->user_data); WRITE_ONCE(cqe->user_data, req->user_data);
WRITE_ONCE(cqe->res, res); WRITE_ONCE(cqe->res, res);
WRITE_ONCE(cqe->flags, cflags); WRITE_ONCE(cqe->flags, cflags);
} else if (ctx->cq_overflow_flushed || return;
atomic_read(&req->task->io_uring->in_idle)) { }
/* if (!ctx->cq_overflow_flushed &&
* If we're in ring overflow flush mode, or in task cancel mode, !atomic_read(&req->task->io_uring->in_idle)) {
* then we cannot store the request for later flushing, we need struct io_overflow_cqe *ocqe;
* to drop it on the floor.
*/ ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
ctx->cached_cq_overflow++; if (!ocqe)
WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow); goto overflow;
} else {
if (list_empty(&ctx->cq_overflow_list)) { if (list_empty(&ctx->cq_overflow_list)) {
set_bit(0, &ctx->sq_check_overflow); set_bit(0, &ctx->sq_check_overflow);
set_bit(0, &ctx->cq_check_overflow); set_bit(0, &ctx->cq_check_overflow);
ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW; ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
} }
if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED)) ocqe->cqe.user_data = req->user_data;
io_clean_op(req); ocqe->cqe.res = res;
ocqe->cqe.flags = cflags;
req->result = res; list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
req->compl.cflags = cflags; return;
req_ref_get(req);
list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
} }
overflow:
/*
* If we're in ring overflow flush mode, or in task cancel mode,
* or cannot allocate an overflow entry, then we need to drop it
* on the floor.
*/
WRITE_ONCE(ctx->rings->cq_overflow, ++ctx->cached_cq_overflow);
} }
static void io_cqring_fill_event(struct io_kiocb *req, long res) static void io_cqring_fill_event(struct io_kiocb *req, long res)
...@@ -2398,7 +2390,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) ...@@ -2398,7 +2390,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
* already triggered a CQE (eg in error). * already triggered a CQE (eg in error).
*/ */
if (test_bit(0, &ctx->cq_check_overflow)) if (test_bit(0, &ctx->cq_check_overflow))
__io_cqring_overflow_flush(ctx, false, NULL, NULL); __io_cqring_overflow_flush(ctx, false);
if (io_cqring_events(ctx)) if (io_cqring_events(ctx))
break; break;
...@@ -6581,7 +6573,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) ...@@ -6581,7 +6573,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
/* if we have a backlog and couldn't flush it all, return BUSY */ /* if we have a backlog and couldn't flush it all, return BUSY */
if (test_bit(0, &ctx->sq_check_overflow)) { if (test_bit(0, &ctx->sq_check_overflow)) {
if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL)) if (!__io_cqring_overflow_flush(ctx, false))
return -EBUSY; return -EBUSY;
} }
...@@ -6881,7 +6873,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, ...@@ -6881,7 +6873,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
int ret; int ret;
do { do {
io_cqring_overflow_flush(ctx, false, NULL, NULL); io_cqring_overflow_flush(ctx, false);
if (io_cqring_events(ctx) >= min_events) if (io_cqring_events(ctx) >= min_events)
return 0; return 0;
if (!io_run_task_work()) if (!io_run_task_work())
...@@ -6913,7 +6905,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, ...@@ -6913,7 +6905,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
trace_io_uring_cqring_wait(ctx, min_events); trace_io_uring_cqring_wait(ctx, min_events);
do { do {
/* if we can't even flush overflow, don't wait for more */ /* if we can't even flush overflow, don't wait for more */
if (!io_cqring_overflow_flush(ctx, false, NULL, NULL)) { if (!io_cqring_overflow_flush(ctx, false)) {
ret = -EBUSY; ret = -EBUSY;
break; break;
} }
...@@ -8616,7 +8608,7 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) ...@@ -8616,7 +8608,7 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
/* if force is set, the ring is going away. always drop after that */ /* if force is set, the ring is going away. always drop after that */
ctx->cq_overflow_flushed = 1; ctx->cq_overflow_flushed = 1;
if (ctx->rings) if (ctx->rings)
__io_cqring_overflow_flush(ctx, true, NULL, NULL); __io_cqring_overflow_flush(ctx, true);
xa_for_each(&ctx->personalities, index, creds) xa_for_each(&ctx->personalities, index, creds)
io_unregister_personality(ctx, index); io_unregister_personality(ctx, index);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
...@@ -8766,7 +8758,6 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, ...@@ -8766,7 +8758,6 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
ret |= io_kill_timeouts(ctx, task, files); ret |= io_kill_timeouts(ctx, task, files);
ret |= io_run_task_work(); ret |= io_run_task_work();
ret |= io_run_ctx_fallback(ctx); ret |= io_run_ctx_fallback(ctx);
io_cqring_overflow_flush(ctx, true, task, files);
if (!ret) if (!ret)
break; break;
cond_resched(); cond_resched();
...@@ -9185,7 +9176,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, ...@@ -9185,7 +9176,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
*/ */
ret = 0; ret = 0;
if (ctx->flags & IORING_SETUP_SQPOLL) { if (ctx->flags & IORING_SETUP_SQPOLL) {
io_cqring_overflow_flush(ctx, false, NULL, NULL); io_cqring_overflow_flush(ctx, false);
ret = -EOWNERDEAD; ret = -EOWNERDEAD;
if (unlikely(ctx->sq_data->thread == NULL)) { if (unlikely(ctx->sq_data->thread == NULL)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment