Commit 75d7b3ae authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: kill REQ_F_COMPLETE_INLINE

REQ_F_COMPLETE_INLINE is only needed to delay queueing into the
completion list to io_queue_sqe() as __io_req_complete() is inlined and
we don't want to bloat the kernel.

As now we complete in a more centralised fashion in io_issue_sqe() we
can get rid of the flag and queue to the list directly.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/600ba20a9338b8a39b249b23d3d177803613dde4.1655371007.git.asml.silence@gmail.comReviewed-by: default avatarHao Xu <howeyxu@tencent.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent df9830d8
...@@ -742,10 +742,7 @@ void io_req_complete_post(struct io_kiocb *req) ...@@ -742,10 +742,7 @@ void io_req_complete_post(struct io_kiocb *req)
inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags) inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags)
{ {
if (issue_flags & IO_URING_F_COMPLETE_DEFER) io_req_complete_post(req);
req->flags |= REQ_F_COMPLETE_INLINE;
else
io_req_complete_post(req);
} }
void io_req_complete_failed(struct io_kiocb *req, s32 res) void io_req_complete_failed(struct io_kiocb *req, s32 res)
...@@ -1581,9 +1578,12 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1581,9 +1578,12 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
if (creds) if (creds)
revert_creds(creds); revert_creds(creds);
if (ret == IOU_OK) if (ret == IOU_OK) {
__io_req_complete(req, issue_flags); if (issue_flags & IO_URING_F_COMPLETE_DEFER)
else if (ret != IOU_ISSUE_SKIP_COMPLETE) io_req_add_compl_list(req);
else
io_req_complete_post(req);
} else if (ret != IOU_ISSUE_SKIP_COMPLETE)
return ret; return ret;
/* If the op doesn't have a file, we're not polling for it */ /* If the op doesn't have a file, we're not polling for it */
...@@ -1749,10 +1749,6 @@ static inline void io_queue_sqe(struct io_kiocb *req) ...@@ -1749,10 +1749,6 @@ static inline void io_queue_sqe(struct io_kiocb *req)
ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
if (req->flags & REQ_F_COMPLETE_INLINE) {
io_req_add_compl_list(req);
return;
}
/* /*
* We async punt it if the file wasn't marked NOWAIT, or if the file * We async punt it if the file wasn't marked NOWAIT, or if the file
* doesn't support non-blocking read/write attempts * doesn't support non-blocking read/write attempts
......
...@@ -217,11 +217,6 @@ static inline bool io_run_task_work(void) ...@@ -217,11 +217,6 @@ static inline bool io_run_task_work(void)
return false; return false;
} }
static inline void io_req_complete_state(struct io_kiocb *req)
{
req->flags |= REQ_F_COMPLETE_INLINE;
}
static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
{ {
if (!*locked) { if (!*locked) {
......
...@@ -301,7 +301,6 @@ enum { ...@@ -301,7 +301,6 @@ enum {
REQ_F_POLLED_BIT, REQ_F_POLLED_BIT,
REQ_F_BUFFER_SELECTED_BIT, REQ_F_BUFFER_SELECTED_BIT,
REQ_F_BUFFER_RING_BIT, REQ_F_BUFFER_RING_BIT,
REQ_F_COMPLETE_INLINE_BIT,
REQ_F_REISSUE_BIT, REQ_F_REISSUE_BIT,
REQ_F_CREDS_BIT, REQ_F_CREDS_BIT,
REQ_F_REFCOUNT_BIT, REQ_F_REFCOUNT_BIT,
...@@ -356,8 +355,6 @@ enum { ...@@ -356,8 +355,6 @@ enum {
REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
/* buffer selected from ring, needs commit */ /* buffer selected from ring, needs commit */
REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT), REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT),
/* completion is deferred through io_comp_state */
REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
/* caller should reissue async */ /* caller should reissue async */
REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
/* supports async reads/writes */ /* supports async reads/writes */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment