Commit 9b797a37 authored by Jens Axboe's avatar Jens Axboe

io_uring: add abstraction around apoll cache

In preparation for adding limits, and one more user, abstract out the
core bits of the allocation+free cache.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 9da7471e
...@@ -158,6 +158,10 @@ struct io_ev_fd { ...@@ -158,6 +158,10 @@ struct io_ev_fd {
struct rcu_head rcu; struct rcu_head rcu;
}; };
struct io_alloc_cache {
struct hlist_head list;
};
struct io_ring_ctx { struct io_ring_ctx {
/* const or read-mostly hot data */ /* const or read-mostly hot data */
struct { struct {
...@@ -216,7 +220,7 @@ struct io_ring_ctx { ...@@ -216,7 +220,7 @@ struct io_ring_ctx {
struct io_hash_table cancel_table_locked; struct io_hash_table cancel_table_locked;
struct list_head cq_overflow_list; struct list_head cq_overflow_list;
struct list_head apoll_cache; struct io_alloc_cache apoll_cache;
struct xarray personalities; struct xarray personalities;
u32 pers_next; u32 pers_next;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
......
#ifndef IOU_ALLOC_CACHE_H
#define IOU_ALLOC_CACHE_H
struct io_cache_entry {
struct hlist_node node;
};
static inline void io_alloc_cache_put(struct io_alloc_cache *cache,
struct io_cache_entry *entry)
{
hlist_add_head(&entry->node, &cache->list);
}
static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
{
if (!hlist_empty(&cache->list)) {
struct hlist_node *node = cache->list.first;
hlist_del(node);
return container_of(node, struct io_cache_entry, node);
}
return NULL;
}
static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
{
INIT_HLIST_HEAD(&cache->list);
}
static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
void (*free)(struct io_cache_entry *))
{
while (!hlist_empty(&cache->list)) {
struct hlist_node *node = cache->list.first;
hlist_del(node);
free(container_of(node, struct io_cache_entry, node));
}
}
#endif
...@@ -92,6 +92,7 @@ ...@@ -92,6 +92,7 @@
#include "timeout.h" #include "timeout.h"
#include "poll.h" #include "poll.h"
#include "alloc_cache.h"
#define IORING_MAX_ENTRIES 32768 #define IORING_MAX_ENTRIES 32768
#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
...@@ -295,7 +296,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) ...@@ -295,7 +296,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->sqd_list); INIT_LIST_HEAD(&ctx->sqd_list);
INIT_LIST_HEAD(&ctx->cq_overflow_list); INIT_LIST_HEAD(&ctx->cq_overflow_list);
INIT_LIST_HEAD(&ctx->io_buffers_cache); INIT_LIST_HEAD(&ctx->io_buffers_cache);
INIT_LIST_HEAD(&ctx->apoll_cache); io_alloc_cache_init(&ctx->apoll_cache);
init_completion(&ctx->ref_comp); init_completion(&ctx->ref_comp);
xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
mutex_init(&ctx->uring_lock); mutex_init(&ctx->uring_lock);
...@@ -1180,8 +1181,7 @@ void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node) ...@@ -1180,8 +1181,7 @@ void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
if (apoll->double_poll) if (apoll->double_poll)
kfree(apoll->double_poll); kfree(apoll->double_poll);
list_add(&apoll->poll.wait.entry, io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache);
&ctx->apoll_cache);
req->flags &= ~REQ_F_POLLED; req->flags &= ~REQ_F_POLLED;
} }
if (req->flags & IO_REQ_LINK_FLAGS) if (req->flags & IO_REQ_LINK_FLAGS)
...@@ -2467,7 +2467,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) ...@@ -2467,7 +2467,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
if (ctx->rings) if (ctx->rings)
__io_cqring_overflow_flush(ctx, true); __io_cqring_overflow_flush(ctx, true);
io_eventfd_unregister(ctx); io_eventfd_unregister(ctx);
io_flush_apoll_cache(ctx); io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
io_destroy_buffers(ctx); io_destroy_buffers(ctx);
if (ctx->sq_creds) if (ctx->sq_creds)
......
...@@ -590,16 +590,15 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, ...@@ -590,16 +590,15 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
unsigned issue_flags) unsigned issue_flags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_cache_entry *entry;
struct async_poll *apoll; struct async_poll *apoll;
if (req->flags & REQ_F_POLLED) { if (req->flags & REQ_F_POLLED) {
apoll = req->apoll; apoll = req->apoll;
kfree(apoll->double_poll); kfree(apoll->double_poll);
} else if (!(issue_flags & IO_URING_F_UNLOCKED) && } else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
!list_empty(&ctx->apoll_cache)) { (entry = io_alloc_cache_get(&ctx->apoll_cache)) != NULL) {
apoll = list_first_entry(&ctx->apoll_cache, struct async_poll, apoll = container_of(entry, struct async_poll, cache);
poll.wait.entry);
list_del_init(&apoll->poll.wait.entry);
} else { } else {
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
if (unlikely(!apoll)) if (unlikely(!apoll))
...@@ -960,14 +959,7 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) ...@@ -960,14 +959,7 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
return IOU_OK; return IOU_OK;
} }
void io_flush_apoll_cache(struct io_ring_ctx *ctx) void io_apoll_cache_free(struct io_cache_entry *entry)
{ {
struct async_poll *apoll; kfree(container_of(entry, struct async_poll, cache));
while (!list_empty(&ctx->apoll_cache)) {
apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
poll.wait.entry);
list_del(&apoll->poll.wait.entry);
kfree(apoll);
}
} }
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include "alloc_cache.h"
enum { enum {
IO_APOLL_OK, IO_APOLL_OK,
IO_APOLL_ABORTED, IO_APOLL_ABORTED,
...@@ -14,7 +16,10 @@ struct io_poll { ...@@ -14,7 +16,10 @@ struct io_poll {
}; };
struct async_poll { struct async_poll {
struct io_poll poll; union {
struct io_poll poll;
struct io_cache_entry cache;
};
struct io_poll *double_poll; struct io_poll *double_poll;
}; };
...@@ -31,4 +36,4 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags); ...@@ -31,4 +36,4 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
bool cancel_all); bool cancel_all);
void io_flush_apoll_cache(struct io_ring_ctx *ctx); void io_apoll_cache_free(struct io_cache_entry *entry);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment