Commit 48863ffd authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: clean up tracing events

We have lots of trace events accepting an io_uring request and wanting
to print some of its fields like user_data, opcode, flags and so on.
However, as trace points were unaware of io_uring structures, we had to
pass all the fields as arguments. Teach trace/events/io_uring.h about
struct io_kiocb and stop the misery of passing a horde of arguments to
trace helpers.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/40ff72f92798114e56d400f2b003beb6cde6ef53.1655384063.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ab1c84d8
This diff is collapsed.
...@@ -452,9 +452,7 @@ void io_queue_iowq(struct io_kiocb *req, bool *dont_use) ...@@ -452,9 +452,7 @@ void io_queue_iowq(struct io_kiocb *req, bool *dont_use)
if (WARN_ON_ONCE(!same_thread_group(req->task, current))) if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
req->work.flags |= IO_WQ_WORK_CANCEL; req->work.flags |= IO_WQ_WORK_CANCEL;
trace_io_uring_queue_async_work(req->ctx, req, req->cqe.user_data, trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
req->opcode, req->flags, &req->work,
io_wq_is_hashed(&req->work));
io_wq_enqueue(tctx->io_wq, &req->work); io_wq_enqueue(tctx->io_wq, &req->work);
if (link) if (link)
io_queue_linked_timeout(link); io_queue_linked_timeout(link);
...@@ -1583,7 +1581,7 @@ static __cold void io_drain_req(struct io_kiocb *req) ...@@ -1583,7 +1581,7 @@ static __cold void io_drain_req(struct io_kiocb *req)
goto queue; goto queue;
} }
trace_io_uring_defer(ctx, req, req->cqe.user_data, req->opcode); trace_io_uring_defer(req);
de->req = req; de->req = req;
de->seq = seq; de->seq = seq;
list_add_tail(&de->list, &ctx->defer_list); list_add_tail(&de->list, &ctx->defer_list);
...@@ -1783,7 +1781,7 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd) ...@@ -1783,7 +1781,7 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd)
{ {
struct file *file = fget(fd); struct file *file = fget(fd);
trace_io_uring_file_get(req->ctx, req, req->cqe.user_data, fd); trace_io_uring_file_get(req, fd);
/* we don't allow fixed io_uring files */ /* we don't allow fixed io_uring files */
if (file && io_is_uring_fops(file)) if (file && io_is_uring_fops(file))
...@@ -2006,7 +2004,7 @@ static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe, ...@@ -2006,7 +2004,7 @@ static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
struct io_submit_link *link = &ctx->submit_state.link; struct io_submit_link *link = &ctx->submit_state.link;
struct io_kiocb *head = link->head; struct io_kiocb *head = link->head;
trace_io_uring_req_failed(sqe, ctx, req, ret); trace_io_uring_req_failed(sqe, req, ret);
/* /*
* Avoid breaking links in the middle as it renders links with SQPOLL * Avoid breaking links in the middle as it renders links with SQPOLL
...@@ -2048,9 +2046,7 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2048,9 +2046,7 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return io_submit_fail_init(sqe, req, ret); return io_submit_fail_init(sqe, req, ret);
/* don't need @sqe from now on */ /* don't need @sqe from now on */
trace_io_uring_submit_sqe(ctx, req, req->cqe.user_data, req->opcode, trace_io_uring_submit_sqe(req, true);
req->flags, true,
ctx->flags & IORING_SETUP_SQPOLL);
/* /*
* If we already have a head request, queue this one for async * If we already have a head request, queue this one for async
...@@ -2064,7 +2060,7 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2064,7 +2060,7 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (unlikely(ret)) if (unlikely(ret))
return io_submit_fail_init(sqe, req, ret); return io_submit_fail_init(sqe, req, ret);
trace_io_uring_link(ctx, req, link->head); trace_io_uring_link(req, link->head);
link->last->link = req; link->last->link = req;
link->last = req; link->last = req;
......
...@@ -288,7 +288,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, ...@@ -288,7 +288,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask,
else else
req->io_task_work.func = io_apoll_task_func; req->io_task_work.func = io_apoll_task_func;
trace_io_uring_task_add(req->ctx, req, req->cqe.user_data, req->opcode, mask); trace_io_uring_task_add(req, mask);
io_req_task_work_add(req); io_req_task_work_add(req);
} }
...@@ -558,8 +558,7 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) ...@@ -558,8 +558,7 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
if (ret || ipt.error) if (ret || ipt.error)
return ret ? IO_APOLL_READY : IO_APOLL_ABORTED; return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
trace_io_uring_poll_arm(ctx, req, req->cqe.user_data, req->opcode, trace_io_uring_poll_arm(req, mask, apoll->poll.events);
mask, apoll->poll.events);
return IO_APOLL_OK; return IO_APOLL_OK;
} }
......
...@@ -115,8 +115,7 @@ static void io_fail_links(struct io_kiocb *req) ...@@ -115,8 +115,7 @@ static void io_fail_links(struct io_kiocb *req)
nxt = link->link; nxt = link->link;
link->link = NULL; link->link = NULL;
trace_io_uring_fail_link(req->ctx, req, req->cqe.user_data, trace_io_uring_fail_link(req, link);
req->opcode, link);
if (ignore_cqes) if (ignore_cqes)
link->flags |= REQ_F_CQE_SKIP; link->flags |= REQ_F_CQE_SKIP;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment