Commit 4a0fef62 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: optimize io_uring_task layout

task_work bits of io_uring_task are split into two cache lines causing
extra cache bouncing, place them into a separate cache line. Also move
the most used submission path fields closer together, so there are hot.

Cc: stable@vger.kernel.org # 5.15+
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent bce5d70c
...@@ -7,22 +7,24 @@ ...@@ -7,22 +7,24 @@
struct io_uring_task { struct io_uring_task {
/* submission side */ /* submission side */
int cached_refs; int cached_refs;
struct xarray xa; const struct io_ring_ctx *last;
struct wait_queue_head wait; struct io_wq *io_wq;
const struct io_ring_ctx *last; struct file *registered_rings[IO_RINGFD_REG_MAX];
struct io_wq *io_wq;
struct percpu_counter inflight; struct xarray xa;
atomic_t inflight_tracked; struct wait_queue_head wait;
atomic_t in_idle; atomic_t in_idle;
atomic_t inflight_tracked;
spinlock_t task_lock; struct percpu_counter inflight;
struct io_wq_work_list task_list;
struct io_wq_work_list prio_task_list; struct { /* task_work */
struct callback_head task_work; spinlock_t task_lock;
bool task_running; bool task_running;
struct io_wq_work_list task_list;
struct file *registered_rings[IO_RINGFD_REG_MAX]; struct io_wq_work_list prio_task_list;
struct callback_head task_work;
} ____cacheline_aligned_in_smp;
}; };
struct io_tctx_node { struct io_tctx_node {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment