Commit 3b8fdd1d authored by Jens Axboe's avatar Jens Axboe

io_uring/fdinfo: fix sqe dumping for IORING_SETUP_SQE128

If we have doubly sized SQEs, then we need to shift the sq index by 1
to account for using two entries for a single request. The CQE dumping
gets this right, but the SQE one does not.

Improve the SQE dumping in general, the information dumped is pretty
sparse and doesn't even cover the whole basic part of the SQE. Include
information on the extended part of the SQE, if doubly sized SQEs are
in use. A typical dump now looks like the following:

[...]
SQEs:	32
   32: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2721, e0:0x0, e1:0xffffb8041000, e2:0x100000000000, e3:0x5500, e4:0x7, e5:0x0, e6:0x0, e7:0x0
   33: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2722, e0:0x0, e1:0xffffb8043000, e2:0x100000000000, e3:0x5508, e4:0x7, e5:0x0, e6:0x0, e7:0x0
   34: opcode:URING_CMD, fd:0, flags:1, off:3225964160, addr:0x0, rw_flags:0x0, buf_index:0 user_data:2723, e0:0x0, e1:0xffffb8045000, e2:0x100000000000, e3:0x5510, e4:0x7, e5:0x0, e6:0x0, e7:0x0
[...]

Fixes: ebdeb7c0 ("io_uring: add support for 128-byte SQEs")
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 4f731705
...@@ -60,12 +60,15 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, ...@@ -60,12 +60,15 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
unsigned int cq_head = READ_ONCE(r->cq.head); unsigned int cq_head = READ_ONCE(r->cq.head);
unsigned int cq_tail = READ_ONCE(r->cq.tail); unsigned int cq_tail = READ_ONCE(r->cq.tail);
unsigned int cq_shift = 0; unsigned int cq_shift = 0;
unsigned int sq_shift = 0;
unsigned int sq_entries, cq_entries; unsigned int sq_entries, cq_entries;
bool has_lock; bool has_lock;
unsigned int i; unsigned int i;
if (ctx->flags & IORING_SETUP_CQE32) if (ctx->flags & IORING_SETUP_CQE32)
cq_shift = 1; cq_shift = 1;
if (ctx->flags & IORING_SETUP_SQE128)
sq_shift = 1;
/* /*
* we may get imprecise sqe and cqe info if uring is actively running * we may get imprecise sqe and cqe info if uring is actively running
...@@ -81,19 +84,36 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, ...@@ -81,19 +84,36 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
seq_printf(m, "CqHead:\t%u\n", cq_head); seq_printf(m, "CqHead:\t%u\n", cq_head);
seq_printf(m, "CqTail:\t%u\n", cq_tail); seq_printf(m, "CqTail:\t%u\n", cq_tail);
seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail); seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
seq_printf(m, "SQEs:\t%u\n", sq_tail - ctx->cached_sq_head); seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head);
sq_entries = min(sq_tail - sq_head, ctx->sq_entries); sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
for (i = 0; i < sq_entries; i++) { for (i = 0; i < sq_entries; i++) {
unsigned int entry = i + sq_head; unsigned int entry = i + sq_head;
unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
struct io_uring_sqe *sqe; struct io_uring_sqe *sqe;
unsigned int sq_idx;
sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
if (sq_idx > sq_mask) if (sq_idx > sq_mask)
continue; continue;
sqe = &ctx->sq_sqes[sq_idx]; sqe = &ctx->sq_sqes[sq_idx << 1];
seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n", seq_printf(m, "%5u: opcode:%s, fd:%d, flags:%x, off:%llu, "
sq_idx, sqe->opcode, sqe->fd, sqe->flags, "addr:0x%llx, rw_flags:0x%x, buf_index:%d "
sqe->user_data); "user_data:%llu",
sq_idx, io_uring_get_opcode(sqe->opcode), sqe->fd,
sqe->flags, (unsigned long long) sqe->off,
(unsigned long long) sqe->addr, sqe->rw_flags,
sqe->buf_index, sqe->user_data);
if (sq_shift) {
u64 *sqeb = (void *) (sqe + 1);
int size = sizeof(struct io_uring_sqe) / sizeof(u64);
int j;
for (j = 0; j < size; j++) {
seq_printf(m, ", e%d:0x%llx", j,
(unsigned long long) *sqeb);
sqeb++;
}
}
seq_printf(m, "\n");
} }
seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head); seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
cq_entries = min(cq_tail - cq_head, ctx->cq_entries); cq_entries = min(cq_tail - cq_head, ctx->cq_entries);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment