Commit b3a4dbc8 authored by Gabriel Krisman Bertazi's avatar Gabriel Krisman Bertazi Committed by Jens Axboe

io_uring/kbuf: Use slab for struct io_buffer objects

The allocation of struct io_buffer for metadata of provided buffers is
done through a custom allocator that directly gets pages and
fragments them.  But, slab would do just fine, as this is not a hot path
(in fact, it is a deprecated feature) and, by keeping a custom allocator
implementation we lose benefits like tracking, poisoning,
sanitizers. Finally, the custom code is more complex and requires
keeping the list of pages in struct ctx for no good reason.  This patch
cleans this path up and just uses slab.

I microbenchmarked it by forcing the allocation of a large number of
objects with the least number of io_uring commands possible (keeping
nbufs=USHRT_MAX), with and without the patch.  There is a slight
increase in time spent in the allocation with slab, of course, but even
when allocating to system resources exhaustion, which is not very
realistic and happened around 1/2 billion provided buffers for me, it
wasn't a significant hit in system time.  Specially if we think of a
real-world scenario, an application doing register/unregister of
provided buffers will hit ctx->io_buffers_cache more often than actually
going to slab.
Signed-off-by: default avatarGabriel Krisman Bertazi <krisman@suse.de>
Link: https://lore.kernel.org/r/20231005000531.30800-4-krisman@suse.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f74c746e
...@@ -350,8 +350,6 @@ struct io_ring_ctx { ...@@ -350,8 +350,6 @@ struct io_ring_ctx {
struct wait_queue_head rsrc_quiesce_wq; struct wait_queue_head rsrc_quiesce_wq;
unsigned rsrc_quiesce; unsigned rsrc_quiesce;
struct list_head io_buffers_pages;
#if defined(CONFIG_UNIX) #if defined(CONFIG_UNIX)
struct socket *ring_sock; struct socket *ring_sock;
#endif #endif
......
...@@ -339,7 +339,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) ...@@ -339,7 +339,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
spin_lock_init(&ctx->completion_lock); spin_lock_init(&ctx->completion_lock);
spin_lock_init(&ctx->timeout_lock); spin_lock_init(&ctx->timeout_lock);
INIT_WQ_LIST(&ctx->iopoll_list); INIT_WQ_LIST(&ctx->iopoll_list);
INIT_LIST_HEAD(&ctx->io_buffers_pages);
INIT_LIST_HEAD(&ctx->io_buffers_comp); INIT_LIST_HEAD(&ctx->io_buffers_comp);
INIT_LIST_HEAD(&ctx->defer_list); INIT_LIST_HEAD(&ctx->defer_list);
INIT_LIST_HEAD(&ctx->timeout_list); INIT_LIST_HEAD(&ctx->timeout_list);
...@@ -4720,6 +4719,9 @@ static int __init io_uring_init(void) ...@@ -4720,6 +4719,9 @@ static int __init io_uring_init(void)
SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU, SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU,
offsetof(struct io_kiocb, cmd.data), offsetof(struct io_kiocb, cmd.data),
sizeof_field(struct io_kiocb, cmd.data), NULL); sizeof_field(struct io_kiocb, cmd.data), NULL);
io_buf_cachep = kmem_cache_create("io_buffer", sizeof(struct io_buffer), 0,
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT,
NULL);
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
register_sysctl_init("kernel", kernel_io_uring_disabled_table); register_sysctl_init("kernel", kernel_io_uring_disabled_table);
......
...@@ -330,6 +330,7 @@ static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) ...@@ -330,6 +330,7 @@ static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
} }
extern struct kmem_cache *req_cachep; extern struct kmem_cache *req_cachep;
extern struct kmem_cache *io_buf_cachep;
static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx) static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
{ {
......
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
/* BIDs are addressed by a 16-bit field in a CQE */ /* BIDs are addressed by a 16-bit field in a CQE */
#define MAX_BIDS_PER_BGID (1 << 16) #define MAX_BIDS_PER_BGID (1 << 16)
struct kmem_cache *io_buf_cachep;
struct io_provide_buf { struct io_provide_buf {
struct file *file; struct file *file;
__u64 addr; __u64 addr;
...@@ -258,6 +260,8 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, ...@@ -258,6 +260,8 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
void io_destroy_buffers(struct io_ring_ctx *ctx) void io_destroy_buffers(struct io_ring_ctx *ctx)
{ {
struct io_buffer_list *bl; struct io_buffer_list *bl;
struct list_head *item, *tmp;
struct io_buffer *buf;
unsigned long index; unsigned long index;
int i; int i;
...@@ -273,12 +277,9 @@ void io_destroy_buffers(struct io_ring_ctx *ctx) ...@@ -273,12 +277,9 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
kfree(bl); kfree(bl);
} }
while (!list_empty(&ctx->io_buffers_pages)) { list_for_each_safe(item, tmp, &ctx->io_buffers_cache) {
struct page *page; buf = list_entry(item, struct io_buffer, list);
kmem_cache_free(io_buf_cachep, buf);
page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
list_del_init(&page->lru);
__free_page(page);
} }
} }
...@@ -361,11 +362,12 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe ...@@ -361,11 +362,12 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
return 0; return 0;
} }
#define IO_BUFFER_ALLOC_BATCH 64
static int io_refill_buffer_cache(struct io_ring_ctx *ctx) static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
{ {
struct io_buffer *buf; struct io_buffer *bufs[IO_BUFFER_ALLOC_BATCH];
struct page *page; int allocated;
int bufs_in_page;
/* /*
* Completions that don't happen inline (eg not under uring_lock) will * Completions that don't happen inline (eg not under uring_lock) will
...@@ -385,22 +387,25 @@ static int io_refill_buffer_cache(struct io_ring_ctx *ctx) ...@@ -385,22 +387,25 @@ static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
/* /*
* No free buffers and no completion entries either. Allocate a new * No free buffers and no completion entries either. Allocate a new
* page worth of buffer entries and add those to our freelist. * batch of buffer entries and add those to our freelist.
*/ */
page = alloc_page(GFP_KERNEL_ACCOUNT);
if (!page)
return -ENOMEM;
list_add(&page->lru, &ctx->io_buffers_pages); allocated = kmem_cache_alloc_bulk(io_buf_cachep, GFP_KERNEL_ACCOUNT,
ARRAY_SIZE(bufs), (void **) bufs);
buf = page_address(page); if (unlikely(!allocated)) {
bufs_in_page = PAGE_SIZE / sizeof(*buf); /*
while (bufs_in_page) { * Bulk alloc is all-or-nothing. If we fail to get a batch,
list_add_tail(&buf->list, &ctx->io_buffers_cache); * retry single alloc to be on the safe side.
buf++; */
bufs_in_page--; bufs[0] = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL);
if (!bufs[0])
return -ENOMEM;
allocated = 1;
} }
while (allocated)
list_add_tail(&bufs[--allocated]->list, &ctx->io_buffers_cache);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment