Commit 5280f7e5 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring/io-wq: return 2-step work swap scheme

Saving one lock/unlock for io-wq is not super important, but adds some
ugliness in the code. More important, atomic decs not turning it to zero
for some archs won't give the right ordering/barriers so the
io_steal_work() may pretty easily get subtly and completely broken.

Return back 2-step io-wq work exchange and clean it up.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ea64ec02
...@@ -555,23 +555,21 @@ static void io_worker_handle_work(struct io_worker *worker) ...@@ -555,23 +555,21 @@ static void io_worker_handle_work(struct io_worker *worker)
/* handle a whole dependent link */ /* handle a whole dependent link */
do { do {
struct io_wq_work *old_work, *next_hashed, *linked; struct io_wq_work *next_hashed, *linked;
unsigned int hash = io_get_work_hash(work); unsigned int hash = io_get_work_hash(work);
next_hashed = wq_next_work(work); next_hashed = wq_next_work(work);
io_impersonate_work(worker, work); io_impersonate_work(worker, work);
wq->do_work(work);
io_assign_current_work(worker, NULL);
old_work = work; linked = wq->free_work(work);
linked = wq->do_work(work);
work = next_hashed; work = next_hashed;
if (!work && linked && !io_wq_is_hashed(linked)) { if (!work && linked && !io_wq_is_hashed(linked)) {
work = linked; work = linked;
linked = NULL; linked = NULL;
} }
io_assign_current_work(worker, work); io_assign_current_work(worker, work);
wq->free_work(old_work);
if (linked) if (linked)
io_wqe_enqueue(wqe, linked); io_wqe_enqueue(wqe, linked);
...@@ -850,11 +848,9 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe) ...@@ -850,11 +848,9 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
struct io_wq *wq = wqe->wq; struct io_wq *wq = wqe->wq;
do { do {
struct io_wq_work *old_work = work;
work->flags |= IO_WQ_WORK_CANCEL; work->flags |= IO_WQ_WORK_CANCEL;
work = wq->do_work(work); wq->do_work(work);
wq->free_work(old_work); work = wq->free_work(work);
} while (work); } while (work);
} }
......
...@@ -106,8 +106,8 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) ...@@ -106,8 +106,8 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
return container_of(work->list.next, struct io_wq_work, list); return container_of(work->list.next, struct io_wq_work, list);
} }
typedef void (free_work_fn)(struct io_wq_work *); typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
typedef struct io_wq_work *(io_wq_work_fn)(struct io_wq_work *); typedef void (io_wq_work_fn)(struct io_wq_work *);
struct io_wq_data { struct io_wq_data {
struct user_struct *user; struct user_struct *user;
......
...@@ -2379,22 +2379,6 @@ static inline void io_put_req_deferred(struct io_kiocb *req, int refs) ...@@ -2379,22 +2379,6 @@ static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
io_free_req_deferred(req); io_free_req_deferred(req);
} }
static struct io_wq_work *io_steal_work(struct io_kiocb *req)
{
struct io_kiocb *nxt;
/*
* A ref is owned by io-wq in which context we're. So, if that's the
* last one, it's safe to steal next work. False negatives are Ok,
* it just will be re-punted async in io_put_work()
*/
if (refcount_read(&req->refs) != 1)
return NULL;
nxt = io_req_find_next(req);
return nxt ? &nxt->work : NULL;
}
static void io_double_put_req(struct io_kiocb *req) static void io_double_put_req(struct io_kiocb *req)
{ {
/* drop both submit and complete references */ /* drop both submit and complete references */
...@@ -6343,7 +6327,7 @@ static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock, ...@@ -6343,7 +6327,7 @@ static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
return 0; return 0;
} }
static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work) static void io_wq_submit_work(struct io_wq_work *work)
{ {
struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_kiocb *timeout; struct io_kiocb *timeout;
...@@ -6394,8 +6378,6 @@ static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work) ...@@ -6394,8 +6378,6 @@ static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
if (lock_ctx) if (lock_ctx)
mutex_unlock(&lock_ctx->uring_lock); mutex_unlock(&lock_ctx->uring_lock);
} }
return io_steal_work(req);
} }
static inline struct file *io_file_from_index(struct io_ring_ctx *ctx, static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
...@@ -8067,12 +8049,12 @@ static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg, ...@@ -8067,12 +8049,12 @@ static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
return __io_sqe_files_update(ctx, &up, nr_args); return __io_sqe_files_update(ctx, &up, nr_args);
} }
static void io_free_work(struct io_wq_work *work) static struct io_wq_work *io_free_work(struct io_wq_work *work)
{ {
struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_kiocb *req = container_of(work, struct io_kiocb, work);
/* Consider that io_steal_work() relies on this ref */ req = io_put_req_find_next(req);
io_put_req(req); return req ? &req->work : NULL;
} }
static int io_init_wq_offload(struct io_ring_ctx *ctx, static int io_init_wq_offload(struct io_ring_ctx *ctx,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment