Commit c4a2ed72 authored by Jens Axboe's avatar Jens Axboe

io_uring: only return -EBUSY for submit on non-flushed backlog

We return -EBUSY on submit when we have a CQ ring overflow backlog, but
that can be a bit problematic if the application is using pure userspace
poll of the CQ ring. For that case, if the ring briefly overflowed and
we have pending entries in the backlog, the submit flushes the backlog
successfully but still returns -EBUSY. If we're able to fully flush the
CQ ring backlog, let the submission proceed.
Reported-by: default avatarDan Melnic <dmm@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f9bd67f6
...@@ -654,7 +654,8 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx) ...@@ -654,7 +654,8 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
eventfd_signal(ctx->cq_ev_fd, 1); eventfd_signal(ctx->cq_ev_fd, 1);
} }
static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) /* Returns true if there are no backlogged entries after the flush */
static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
{ {
struct io_rings *rings = ctx->rings; struct io_rings *rings = ctx->rings;
struct io_uring_cqe *cqe; struct io_uring_cqe *cqe;
...@@ -664,10 +665,10 @@ static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) ...@@ -664,10 +665,10 @@ static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
if (!force) { if (!force) {
if (list_empty_careful(&ctx->cq_overflow_list)) if (list_empty_careful(&ctx->cq_overflow_list))
return; return true;
if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) == if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
rings->cq_ring_entries)) rings->cq_ring_entries))
return; return false;
} }
spin_lock_irqsave(&ctx->completion_lock, flags); spin_lock_irqsave(&ctx->completion_lock, flags);
...@@ -676,6 +677,7 @@ static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) ...@@ -676,6 +677,7 @@ static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
if (force) if (force)
ctx->cq_overflow_flushed = true; ctx->cq_overflow_flushed = true;
cqe = NULL;
while (!list_empty(&ctx->cq_overflow_list)) { while (!list_empty(&ctx->cq_overflow_list)) {
cqe = io_get_cqring(ctx); cqe = io_get_cqring(ctx);
if (!cqe && !force) if (!cqe && !force)
...@@ -703,6 +705,8 @@ static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) ...@@ -703,6 +705,8 @@ static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
list_del(&req->list); list_del(&req->list);
io_put_req(req); io_put_req(req);
} }
return cqe != NULL;
} }
static void io_cqring_fill_event(struct io_kiocb *req, long res) static void io_cqring_fill_event(struct io_kiocb *req, long res)
...@@ -3144,10 +3148,10 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, ...@@ -3144,10 +3148,10 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
int i, submitted = 0; int i, submitted = 0;
bool mm_fault = false; bool mm_fault = false;
if (!list_empty(&ctx->cq_overflow_list)) { /* if we have a backlog and couldn't flush it all, return BUSY */
io_cqring_overflow_flush(ctx, false); if (!list_empty(&ctx->cq_overflow_list) &&
!io_cqring_overflow_flush(ctx, false))
return -EBUSY; return -EBUSY;
}
if (nr > IO_PLUG_THRESHOLD) { if (nr > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, ctx, nr); io_submit_state_start(&state, ctx, nr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment