Commit 00b0db56 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: open code io_fill_cqe_req()

io_fill_cqe_req() is only called from one place, open code it, and
rename __io_fill_cqe_req().
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/f432ce75bb1c94cadf0bd2add4d6aa510bd1fb36.1691757663.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b2e74db5
......@@ -978,8 +978,10 @@ static void __io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
struct io_rsrc_node *rsrc_node = NULL;
io_cq_lock(ctx);
if (!(req->flags & REQ_F_CQE_SKIP))
io_fill_cqe_req(ctx, req);
if (!(req->flags & REQ_F_CQE_SKIP)) {
if (!io_fill_cqe_req(ctx, req))
io_req_cqe_overflow(req);
}
/*
* If we're the last reference to this request, add to our locked
......@@ -1556,7 +1558,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
comp_list);
if (!(req->flags & REQ_F_CQE_SKIP) &&
unlikely(!__io_fill_cqe_req(ctx, req))) {
unlikely(!io_fill_cqe_req(ctx, req))) {
if (ctx->task_complete) {
spin_lock(&ctx->completion_lock);
io_req_cqe_overflow(req);
......
......@@ -133,8 +133,7 @@ static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
return io_get_cqe_overflow(ctx, false);
}
static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
struct io_kiocb *req)
static inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, struct io_kiocb *req)
{
struct io_uring_cqe *cqe;
......@@ -168,14 +167,6 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
return true;
}
static inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
struct io_kiocb *req)
{
if (likely(__io_fill_cqe_req(ctx, req)))
return true;
return io_req_cqe_overflow(req);
}
static inline void req_set_fail(struct io_kiocb *req)
{
req->flags |= REQ_F_FAIL;
......
......@@ -1064,7 +1064,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
continue;
req->cqe.flags = io_put_kbuf(req, 0);
if (unlikely(!__io_fill_cqe_req(ctx, req))) {
if (unlikely(!io_fill_cqe_req(ctx, req))) {
spin_lock(&ctx->completion_lock);
io_req_cqe_overflow(req);
spin_unlock(&ctx->completion_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment