Commit 795bbbc8 authored by Hao Xu's avatar Hao Xu Committed by Jens Axboe

io_uring: kbuf: inline io_kbuf_recycle_ring()

Make io_kbuf_recycle_ring() inline since it is the fast path of
provided buffer.
Signed-off-by: default avatarHao Xu <howeyxu@tencent.com>
Link: https://lore.kernel.org/r/20220623130126.179232-1-hao.xu@linux.devSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 49f1c68e
......@@ -74,34 +74,6 @@ void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
return;
}
void io_kbuf_recycle_ring(struct io_kiocb *req)
{
/*
* We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
* the flag and hence ensure that bl->head doesn't get incremented.
* If the tail has already been incremented, hang on to it.
* The exception is partial io, that case we should increment bl->head
* to monopolize the buffer.
*/
if (req->buf_list) {
if (req->flags & REQ_F_PARTIAL_IO) {
/*
* If we end up here, then the io_uring_lock has
* been kept held since we retrieved the buffer.
* For the io-wq case, we already cleared
* req->buf_list when the buffer was retrieved,
* hence it cannot be set here for that case.
*/
req->buf_list->head++;
req->buf_list = NULL;
} else {
req->buf_index = req->buf_list->bgid;
req->flags &= ~REQ_F_BUFFER_RING;
}
}
return;
}
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
{
unsigned int cflags;
......
......@@ -49,7 +49,33 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
void io_kbuf_recycle_ring(struct io_kiocb *req);
static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
{
/*
* We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
* the flag and hence ensure that bl->head doesn't get incremented.
* If the tail has already been incremented, hang on to it.
* The exception is partial io, that case we should increment bl->head
* to monopolize the buffer.
*/
if (req->buf_list) {
if (req->flags & REQ_F_PARTIAL_IO) {
/*
* If we end up here, then the io_uring_lock has
* been kept held since we retrieved the buffer.
* For the io-wq case, we already cleared
* req->buf_list when the buffer was retrieved,
* hence it cannot be set here for that case.
*/
req->buf_list->head++;
req->buf_list = NULL;
} else {
req->buf_index = req->buf_list->bgid;
req->flags &= ~REQ_F_BUFFER_RING;
}
}
}
static inline bool io_do_buffer_select(struct io_kiocb *req)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment