Commit c4bb964f authored by Stefan Roesch's avatar Stefan Roesch Committed by Jens Axboe

io_uring: add tracing for additional CQE32 fields

This adds tracing for the extra1 and extra2 fields.
Co-developed-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarStefan Roesch <shr@fb.com>
Reviewed-by: default avatarKanchan Joshi <joshi.k@samsung.com>
Link: https://lore.kernel.org/r/20220426182134.136504-10-shr@fb.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e45a3e05
...@@ -2348,7 +2348,7 @@ static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx, ...@@ -2348,7 +2348,7 @@ static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx,
struct io_uring_cqe *cqe; struct io_uring_cqe *cqe;
trace_io_uring_complete(req->ctx, req, req->cqe.user_data, trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
req->cqe.res, req->cqe.flags); req->cqe.res, req->cqe.flags, 0, 0);
/* /*
* If we can't get a cq entry, userspace overflowed the * If we can't get a cq entry, userspace overflowed the
...@@ -2372,7 +2372,7 @@ static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx, ...@@ -2372,7 +2372,7 @@ static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx,
u64 extra2 = req->extra2; u64 extra2 = req->extra2;
trace_io_uring_complete(req->ctx, req, req->cqe.user_data, trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
req->cqe.res, req->cqe.flags); req->cqe.res, req->cqe.flags, extra1, extra2);
/* /*
* If we can't get a cq entry, userspace overflowed the * If we can't get a cq entry, userspace overflowed the
...@@ -2393,7 +2393,7 @@ static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx, ...@@ -2393,7 +2393,7 @@ static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx,
static inline bool __io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags) static inline bool __io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
{ {
trace_io_uring_complete(req->ctx, req, req->cqe.user_data, res, cflags); trace_io_uring_complete(req->ctx, req, req->cqe.user_data, res, cflags, 0, 0);
return __io_fill_cqe(req->ctx, req->cqe.user_data, res, cflags); return __io_fill_cqe(req->ctx, req->cqe.user_data, res, cflags);
} }
...@@ -2408,7 +2408,8 @@ static inline void __io_fill_cqe32_req(struct io_kiocb *req, s32 res, u32 cflags ...@@ -2408,7 +2408,8 @@ static inline void __io_fill_cqe32_req(struct io_kiocb *req, s32 res, u32 cflags
if (req->flags & REQ_F_CQE_SKIP) if (req->flags & REQ_F_CQE_SKIP)
return; return;
trace_io_uring_complete(ctx, req, req->cqe.user_data, res, cflags); trace_io_uring_complete(ctx, req, req->cqe.user_data, res, cflags,
extra1, extra2);
/* /*
* If we can't get a cq entry, userspace overflowed the * If we can't get a cq entry, userspace overflowed the
...@@ -2432,7 +2433,7 @@ static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, ...@@ -2432,7 +2433,7 @@ static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
s32 res, u32 cflags) s32 res, u32 cflags)
{ {
ctx->cq_extra++; ctx->cq_extra++;
trace_io_uring_complete(ctx, NULL, user_data, res, cflags); trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
return __io_fill_cqe(ctx, user_data, res, cflags); return __io_fill_cqe(ctx, user_data, res, cflags);
} }
......
...@@ -321,13 +321,16 @@ TRACE_EVENT(io_uring_fail_link, ...@@ -321,13 +321,16 @@ TRACE_EVENT(io_uring_fail_link,
* @user_data: user data associated with the request * @user_data: user data associated with the request
* @res: result of the request * @res: result of the request
* @cflags: completion flags * @cflags: completion flags
* @extra1: extra 64-bit data for CQE32
* @extra2: extra 64-bit data for CQE32
* *
*/ */
TRACE_EVENT(io_uring_complete, TRACE_EVENT(io_uring_complete,
TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags), TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags,
u64 extra1, u64 extra2),
TP_ARGS(ctx, req, user_data, res, cflags), TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2),
TP_STRUCT__entry ( TP_STRUCT__entry (
__field( void *, ctx ) __field( void *, ctx )
...@@ -335,6 +338,8 @@ TRACE_EVENT(io_uring_complete, ...@@ -335,6 +338,8 @@ TRACE_EVENT(io_uring_complete,
__field( u64, user_data ) __field( u64, user_data )
__field( int, res ) __field( int, res )
__field( unsigned, cflags ) __field( unsigned, cflags )
__field( u64, extra1 )
__field( u64, extra2 )
), ),
TP_fast_assign( TP_fast_assign(
...@@ -343,12 +348,17 @@ TRACE_EVENT(io_uring_complete, ...@@ -343,12 +348,17 @@ TRACE_EVENT(io_uring_complete,
__entry->user_data = user_data; __entry->user_data = user_data;
__entry->res = res; __entry->res = res;
__entry->cflags = cflags; __entry->cflags = cflags;
__entry->extra1 = extra1;
__entry->extra2 = extra2;
), ),
TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x", TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x "
"extra1 %llu extra2 %llu ",
__entry->ctx, __entry->req, __entry->ctx, __entry->req,
__entry->user_data, __entry->user_data,
__entry->res, __entry->cflags) __entry->res, __entry->cflags,
(unsigned long long) __entry->extra1,
(unsigned long long) __entry->extra2)
); );
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment