Commit 53ccf69b authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: don't inline io_put_kbuf

io_put_kbuf() is huge, don't bloat the kernel with inlining.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/2e21ccf0be471ffa654032914b9430813cae53f8.1655371007.git.asml.silence@gmail.comReviewed-by: default avatarHao Xu <howeyxu@tencent.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7012c815
...@@ -83,6 +83,39 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx, ...@@ -83,6 +83,39 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx,
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL)); return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
} }
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
{
unsigned int cflags;
/*
* We can add this buffer back to two lists:
*
* 1) The io_buffers_cache list. This one is protected by the
* ctx->uring_lock. If we already hold this lock, add back to this
* list as we can grab it from issue as well.
* 2) The io_buffers_comp list. This one is protected by the
* ctx->completion_lock.
*
* We migrate buffers from the comp_list to the issue cache list
* when we need one.
*/
if (req->flags & REQ_F_BUFFER_RING) {
/* no buffers to recycle for this case */
cflags = __io_put_kbuf_list(req, NULL);
} else if (issue_flags & IO_URING_F_UNLOCKED) {
struct io_ring_ctx *ctx = req->ctx;
spin_lock(&ctx->completion_lock);
cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
spin_unlock(&ctx->completion_lock);
} else {
lockdep_assert_held(&req->ctx->uring_lock);
cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
}
return cflags;
}
static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
struct io_buffer_list *bl) struct io_buffer_list *bl)
{ {
......
...@@ -47,6 +47,8 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags); ...@@ -47,6 +47,8 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
static inline bool io_do_buffer_select(struct io_kiocb *req) static inline bool io_do_buffer_select(struct io_kiocb *req)
{ {
if (!(req->flags & REQ_F_BUFFER_SELECT)) if (!(req->flags & REQ_F_BUFFER_SELECT))
...@@ -79,7 +81,8 @@ static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) ...@@ -79,7 +81,8 @@ static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
__io_kbuf_recycle(req, issue_flags); __io_kbuf_recycle(req, issue_flags);
} }
static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list) static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
struct list_head *list)
{ {
if (req->flags & REQ_F_BUFFER_RING) { if (req->flags & REQ_F_BUFFER_RING) {
if (req->buf_list) if (req->buf_list)
...@@ -99,44 +102,15 @@ static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req) ...@@ -99,44 +102,15 @@ static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
return 0; return 0;
return __io_put_kbuf(req, &req->ctx->io_buffers_comp); return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
} }
static inline unsigned int io_put_kbuf(struct io_kiocb *req, static inline unsigned int io_put_kbuf(struct io_kiocb *req,
unsigned issue_flags) unsigned issue_flags)
{ {
unsigned int cflags;
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
return 0; return 0;
return __io_put_kbuf(req, issue_flags);
/*
* We can add this buffer back to two lists:
*
* 1) The io_buffers_cache list. This one is protected by the
* ctx->uring_lock. If we already hold this lock, add back to this
* list as we can grab it from issue as well.
* 2) The io_buffers_comp list. This one is protected by the
* ctx->completion_lock.
*
* We migrate buffers from the comp_list to the issue cache list
* when we need one.
*/
if (req->flags & REQ_F_BUFFER_RING) {
/* no buffers to recycle for this case */
cflags = __io_put_kbuf(req, NULL);
} else if (issue_flags & IO_URING_F_UNLOCKED) {
struct io_ring_ctx *ctx = req->ctx;
spin_lock(&ctx->completion_lock);
cflags = __io_put_kbuf(req, &ctx->io_buffers_comp);
spin_unlock(&ctx->completion_lock);
} else {
lockdep_assert_held(&req->ctx->uring_lock);
cflags = __io_put_kbuf(req, &req->ctx->io_buffers_cache);
}
return cflags;
} }
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment