Commit b1c62645 authored by Jens Axboe's avatar Jens Axboe

io_uring: recycle provided buffers if request goes async

If we are using provided buffers, it's less than useful to have a buffer
selected and pinned if a request needs to go async or arms poll for
notification trigger on when we can process it.

Recycle the buffer in those events, so we don't pin it for the duration
of the request.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 2be2eb02
...@@ -269,6 +269,7 @@ struct io_buffer { ...@@ -269,6 +269,7 @@ struct io_buffer {
__u64 addr; __u64 addr;
__u32 len; __u32 len;
__u16 bid; __u16 bid;
__u16 bgid;
}; };
struct io_restriction { struct io_restriction {
...@@ -1351,6 +1352,36 @@ static inline unsigned int io_put_kbuf(struct io_kiocb *req, ...@@ -1351,6 +1352,36 @@ static inline unsigned int io_put_kbuf(struct io_kiocb *req,
return cflags; return cflags;
} }
static void io_kbuf_recycle(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer *head, *buf;
if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
return;
lockdep_assert_held(&ctx->uring_lock);
buf = req->kbuf;
head = xa_load(&ctx->io_buffers, buf->bgid);
if (head) {
list_add(&buf->list, &head->list);
} else {
int ret;
INIT_LIST_HEAD(&buf->list);
/* if we fail, just leave buffer attached */
ret = xa_insert(&ctx->io_buffers, buf->bgid, buf, GFP_KERNEL);
if (unlikely(ret < 0))
return;
}
req->flags &= ~REQ_F_BUFFER_SELECTED;
req->kbuf = NULL;
}
static bool io_match_task(struct io_kiocb *head, struct task_struct *task, static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
bool cancel_all) bool cancel_all)
__must_hold(&req->ctx->timeout_lock) __must_hold(&req->ctx->timeout_lock)
...@@ -4763,6 +4794,7 @@ static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf, ...@@ -4763,6 +4794,7 @@ static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
buf->addr = addr; buf->addr = addr;
buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT); buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
buf->bid = bid; buf->bid = bid;
buf->bgid = pbuf->bgid;
addr += pbuf->len; addr += pbuf->len;
bid++; bid++;
if (!*head) { if (!*head) {
...@@ -7395,8 +7427,12 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req) ...@@ -7395,8 +7427,12 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req)
* Queued up for async execution, worker will release * Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted. * submit reference when the iocb is actually submitted.
*/ */
io_kbuf_recycle(req);
io_queue_async_work(req, NULL); io_queue_async_work(req, NULL);
break; break;
case IO_APOLL_OK:
io_kbuf_recycle(req);
break;
} }
if (linked_timeout) if (linked_timeout)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment