Commit de9b4cca authored by Jens Axboe's avatar Jens Axboe

io_uring: wrap io_kiocb reference count manipulation in helpers

No functional changes in this patch, just in preparation for handling the
references a bit more efficiently.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 179ae0d1
...@@ -1470,6 +1470,31 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, ...@@ -1470,6 +1470,31 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
return ret; return ret;
} }
static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
{
return refcount_inc_not_zero(&req->refs);
}
static inline bool req_ref_sub_and_test(struct io_kiocb *req, int refs)
{
return refcount_sub_and_test(refs, &req->refs);
}
static inline bool req_ref_put_and_test(struct io_kiocb *req)
{
return refcount_dec_and_test(&req->refs);
}
static inline void req_ref_put(struct io_kiocb *req)
{
refcount_dec(&req->refs);
}
static inline void req_ref_get(struct io_kiocb *req)
{
refcount_inc(&req->refs);
}
static void __io_cqring_fill_event(struct io_kiocb *req, long res, static void __io_cqring_fill_event(struct io_kiocb *req, long res,
unsigned int cflags) unsigned int cflags)
{ {
...@@ -1506,7 +1531,7 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, ...@@ -1506,7 +1531,7 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res,
io_clean_op(req); io_clean_op(req);
req->result = res; req->result = res;
req->compl.cflags = cflags; req->compl.cflags = cflags;
refcount_inc(&req->refs); req_ref_get(req);
list_add_tail(&req->compl.list, &ctx->cq_overflow_list); list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
} }
} }
...@@ -1528,7 +1553,7 @@ static void io_req_complete_post(struct io_kiocb *req, long res, ...@@ -1528,7 +1553,7 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
* If we're the last reference to this request, add to our locked * If we're the last reference to this request, add to our locked
* free_list cache. * free_list cache.
*/ */
if (refcount_dec_and_test(&req->refs)) { if (req_ref_put_and_test(req)) {
struct io_comp_state *cs = &ctx->submit_state.comp; struct io_comp_state *cs = &ctx->submit_state.comp;
if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
...@@ -2108,7 +2133,7 @@ static void io_submit_flush_completions(struct io_comp_state *cs, ...@@ -2108,7 +2133,7 @@ static void io_submit_flush_completions(struct io_comp_state *cs,
req = cs->reqs[i]; req = cs->reqs[i];
/* submission and completion refs */ /* submission and completion refs */
if (refcount_sub_and_test(2, &req->refs)) if (req_ref_sub_and_test(req, 2))
io_req_free_batch(&rb, req, &ctx->submit_state); io_req_free_batch(&rb, req, &ctx->submit_state);
} }
...@@ -2124,7 +2149,7 @@ static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) ...@@ -2124,7 +2149,7 @@ static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
{ {
struct io_kiocb *nxt = NULL; struct io_kiocb *nxt = NULL;
if (refcount_dec_and_test(&req->refs)) { if (req_ref_put_and_test(req)) {
nxt = io_req_find_next(req); nxt = io_req_find_next(req);
__io_free_req(req); __io_free_req(req);
} }
...@@ -2133,7 +2158,7 @@ static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) ...@@ -2133,7 +2158,7 @@ static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
static void io_put_req(struct io_kiocb *req) static void io_put_req(struct io_kiocb *req)
{ {
if (refcount_dec_and_test(&req->refs)) if (req_ref_put_and_test(req))
io_free_req(req); io_free_req(req);
} }
...@@ -2156,14 +2181,14 @@ static void io_free_req_deferred(struct io_kiocb *req) ...@@ -2156,14 +2181,14 @@ static void io_free_req_deferred(struct io_kiocb *req)
static inline void io_put_req_deferred(struct io_kiocb *req, int refs) static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
{ {
if (refcount_sub_and_test(refs, &req->refs)) if (req_ref_sub_and_test(req, refs))
io_free_req_deferred(req); io_free_req_deferred(req);
} }
static void io_double_put_req(struct io_kiocb *req) static void io_double_put_req(struct io_kiocb *req)
{ {
/* drop both submit and complete references */ /* drop both submit and complete references */
if (refcount_sub_and_test(2, &req->refs)) if (req_ref_sub_and_test(req, 2))
io_free_req(req); io_free_req(req);
} }
...@@ -2249,7 +2274,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, ...@@ -2249,7 +2274,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
__io_cqring_fill_event(req, req->result, cflags); __io_cqring_fill_event(req, req->result, cflags);
(*nr_events)++; (*nr_events)++;
if (refcount_dec_and_test(&req->refs)) if (req_ref_put_and_test(req))
io_req_free_batch(&rb, req, &ctx->submit_state); io_req_free_batch(&rb, req, &ctx->submit_state);
} }
...@@ -2464,7 +2489,7 @@ static bool io_rw_reissue(struct io_kiocb *req) ...@@ -2464,7 +2489,7 @@ static bool io_rw_reissue(struct io_kiocb *req)
lockdep_assert_held(&req->ctx->uring_lock); lockdep_assert_held(&req->ctx->uring_lock);
if (io_resubmit_prep(req)) { if (io_resubmit_prep(req)) {
refcount_inc(&req->refs); req_ref_get(req);
io_queue_async_work(req); io_queue_async_work(req);
return true; return true;
} }
...@@ -3169,7 +3194,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, ...@@ -3169,7 +3194,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
list_del_init(&wait->entry); list_del_init(&wait->entry);
/* submit ref gets dropped, acquire a new one */ /* submit ref gets dropped, acquire a new one */
refcount_inc(&req->refs); req_ref_get(req);
io_req_task_queue(req); io_req_task_queue(req);
return 1; return 1;
} }
...@@ -4893,7 +4918,7 @@ static void io_poll_remove_double(struct io_kiocb *req) ...@@ -4893,7 +4918,7 @@ static void io_poll_remove_double(struct io_kiocb *req)
spin_lock(&head->lock); spin_lock(&head->lock);
list_del_init(&poll->wait.entry); list_del_init(&poll->wait.entry);
if (poll->wait.private) if (poll->wait.private)
refcount_dec(&req->refs); req_ref_put(req);
poll->head = NULL; poll->head = NULL;
spin_unlock(&head->lock); spin_unlock(&head->lock);
} }
...@@ -4959,7 +4984,7 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, ...@@ -4959,7 +4984,7 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
poll->wait.func(&poll->wait, mode, sync, key); poll->wait.func(&poll->wait, mode, sync, key);
} }
} }
refcount_dec(&req->refs); req_ref_put(req);
return 1; return 1;
} }
...@@ -5002,7 +5027,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, ...@@ -5002,7 +5027,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
return; return;
} }
io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake); io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
refcount_inc(&req->refs); req_ref_get(req);
poll->wait.private = req; poll->wait.private = req;
*poll_ptr = poll; *poll_ptr = poll;
} }
...@@ -6142,7 +6167,7 @@ static void io_wq_submit_work(struct io_wq_work *work) ...@@ -6142,7 +6167,7 @@ static void io_wq_submit_work(struct io_wq_work *work)
/* avoid locking problems by failing it from a clean context */ /* avoid locking problems by failing it from a clean context */
if (ret) { if (ret) {
/* io-wq is going to take one down */ /* io-wq is going to take one down */
refcount_inc(&req->refs); req_ref_get(req);
io_req_task_queue_fail(req, ret); io_req_task_queue_fail(req, ret);
} }
} }
...@@ -6200,7 +6225,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) ...@@ -6200,7 +6225,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
* We don't expect the list to be empty, that will only happen if we * We don't expect the list to be empty, that will only happen if we
* race with the completion of the linked work. * race with the completion of the linked work.
*/ */
if (prev && refcount_inc_not_zero(&prev->refs)) if (prev && req_ref_inc_not_zero(prev))
io_remove_next_linked(prev); io_remove_next_linked(prev);
else else
prev = NULL; prev = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment