Commit 88f171ab authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: wait potential ->release() on resurrect

There is a short window where percpu_refs are already turned zero, but
we try to do resurrect(). Play nicer and wait for ->release() to happen
in this case and proceed as everything is ok. One downside for ctx refs
is that we can ignore signal_pending() on a rare occasion, but someone
else should check for it later if needed.

Cc: <stable@vger.kernel.org> # 5.5+
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f2303b1f
...@@ -1104,6 +1104,21 @@ static inline void io_set_resource_node(struct io_kiocb *req) ...@@ -1104,6 +1104,21 @@ static inline void io_set_resource_node(struct io_kiocb *req)
} }
} }
static bool io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
{
if (!percpu_ref_tryget(ref)) {
/* already at zero, wait for ->release() */
if (!try_wait_for_completion(compl))
synchronize_rcu();
return false;
}
percpu_ref_resurrect(ref);
reinit_completion(compl);
percpu_ref_put(ref);
return true;
}
static bool io_match_task(struct io_kiocb *head, static bool io_match_task(struct io_kiocb *head,
struct task_struct *task, struct task_struct *task,
struct files_struct *files) struct files_struct *files)
...@@ -7329,13 +7344,11 @@ static int io_rsrc_ref_quiesce(struct fixed_rsrc_data *data, ...@@ -7329,13 +7344,11 @@ static int io_rsrc_ref_quiesce(struct fixed_rsrc_data *data,
flush_delayed_work(&ctx->rsrc_put_work); flush_delayed_work(&ctx->rsrc_put_work);
ret = wait_for_completion_interruptible(&data->done); ret = wait_for_completion_interruptible(&data->done);
if (!ret) if (!ret || !io_refs_resurrect(&data->refs, &data->done))
break; break;
percpu_ref_resurrect(&data->refs);
io_sqe_rsrc_set_node(ctx, data, backup_node); io_sqe_rsrc_set_node(ctx, data, backup_node);
backup_node = NULL; backup_node = NULL;
reinit_completion(&data->done);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
ret = io_run_task_work_sig(); ret = io_run_task_work_sig();
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
...@@ -10070,10 +10083,8 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, ...@@ -10070,10 +10083,8 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
if (ret) { if (ret && io_refs_resurrect(&ctx->refs, &ctx->ref_comp))
percpu_ref_resurrect(&ctx->refs); return ret;
goto out_quiesce;
}
} }
if (ctx->restricted) { if (ctx->restricted) {
...@@ -10165,7 +10176,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, ...@@ -10165,7 +10176,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
if (io_register_op_must_quiesce(opcode)) { if (io_register_op_must_quiesce(opcode)) {
/* bring the ctx back to life */ /* bring the ctx back to life */
percpu_ref_reinit(&ctx->refs); percpu_ref_reinit(&ctx->refs);
out_quiesce:
reinit_completion(&ctx->ref_comp); reinit_completion(&ctx->ref_comp);
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment