Commit d430adfe authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-5.11-2021-01-10' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:
 "A bit larger than I had hoped at this point, but it's all changes that
  will be directed towards stable anyway. In detail:

   - Fix a merge window regression on error return (Matthew)

   - Remove useless variable declaration/assignment (Ye Bin)

   - IOPOLL fixes (Pavel)

   - Exit and cancelation fixes (Pavel)

   - fasync lockdep complaint fix (Pavel)

   - Ensure SQPOLL is synchronized with creator life time (Pavel)"

* tag 'io_uring-5.11-2021-01-10' of git://git.kernel.dk/linux-block:
  io_uring: stop SQPOLL submit on creator's death
  io_uring: add warn_once for io_uring_flush()
  io_uring: inline io_uring_attempt_task_drop()
  io_uring: io_rw_reissue lockdep annotations
  io_uring: synchronise ev_posted() with waitqueues
  io_uring: dont kill fasync under completion_lock
  io_uring: trigger eventfd for IOPOLL
  io_uring: Fix return value from alloc_fixed_file_ref_node
  io_uring: Delete useless variable ‘id’ in io_prep_async_work
  io_uring: cancel more aggressively in exit_work
  io_uring: drop file refs after task cancel
  io_uring: patch up IOPOLL overflow_flush sync
  io_uring: synchronise IOPOLL on task_submit fail
parents 28318f53 d9d05217
...@@ -262,6 +262,7 @@ struct io_ring_ctx { ...@@ -262,6 +262,7 @@ struct io_ring_ctx {
unsigned int drain_next: 1; unsigned int drain_next: 1;
unsigned int eventfd_async: 1; unsigned int eventfd_async: 1;
unsigned int restricted: 1; unsigned int restricted: 1;
unsigned int sqo_dead: 1;
/* /*
* Ring buffer of indices into array of io_uring_sqe, which is * Ring buffer of indices into array of io_uring_sqe, which is
...@@ -992,6 +993,9 @@ enum io_mem_account { ...@@ -992,6 +993,9 @@ enum io_mem_account {
ACCT_PINNED, ACCT_PINNED,
}; };
static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
struct task_struct *task);
static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node); static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
static struct fixed_file_ref_node *alloc_fixed_file_ref_node( static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
struct io_ring_ctx *ctx); struct io_ring_ctx *ctx);
...@@ -1342,11 +1346,6 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx) ...@@ -1342,11 +1346,6 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
/* order cqe stores with ring update */ /* order cqe stores with ring update */
smp_store_release(&rings->cq.tail, ctx->cached_cq_tail); smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
if (wq_has_sleeper(&ctx->cq_wait)) {
wake_up_interruptible(&ctx->cq_wait);
kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
}
} }
static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req) static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
...@@ -1520,10 +1519,8 @@ static void io_prep_async_work(struct io_kiocb *req) ...@@ -1520,10 +1519,8 @@ static void io_prep_async_work(struct io_kiocb *req)
{ {
const struct io_op_def *def = &io_op_defs[req->opcode]; const struct io_op_def *def = &io_op_defs[req->opcode];
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_identity *id;
io_req_init_async(req); io_req_init_async(req);
id = req->work.identity;
if (req->flags & REQ_F_FORCE_ASYNC) if (req->flags & REQ_F_FORCE_ASYNC)
req->work.flags |= IO_WQ_WORK_CONCURRENT; req->work.flags |= IO_WQ_WORK_CONCURRENT;
...@@ -1704,18 +1701,42 @@ static inline unsigned __io_cqring_events(struct io_ring_ctx *ctx) ...@@ -1704,18 +1701,42 @@ static inline unsigned __io_cqring_events(struct io_ring_ctx *ctx)
static void io_cqring_ev_posted(struct io_ring_ctx *ctx) static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
{ {
/* see waitqueue_active() comment */
smp_mb();
if (waitqueue_active(&ctx->wait)) if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait); wake_up(&ctx->wait);
if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait)) if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
wake_up(&ctx->sq_data->wait); wake_up(&ctx->sq_data->wait);
if (io_should_trigger_evfd(ctx)) if (io_should_trigger_evfd(ctx))
eventfd_signal(ctx->cq_ev_fd, 1); eventfd_signal(ctx->cq_ev_fd, 1);
if (waitqueue_active(&ctx->cq_wait)) {
wake_up_interruptible(&ctx->cq_wait);
kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
}
}
static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
{
/* see waitqueue_active() comment */
smp_mb();
if (ctx->flags & IORING_SETUP_SQPOLL) {
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
}
if (io_should_trigger_evfd(ctx))
eventfd_signal(ctx->cq_ev_fd, 1);
if (waitqueue_active(&ctx->cq_wait)) {
wake_up_interruptible(&ctx->cq_wait);
kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
}
} }
/* Returns true if there are no backlogged entries after the flush */ /* Returns true if there are no backlogged entries after the flush */
static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
struct task_struct *tsk, struct task_struct *tsk,
struct files_struct *files) struct files_struct *files)
{ {
struct io_rings *rings = ctx->rings; struct io_rings *rings = ctx->rings;
struct io_kiocb *req, *tmp; struct io_kiocb *req, *tmp;
...@@ -1768,6 +1789,20 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, ...@@ -1768,6 +1789,20 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
return all_flushed; return all_flushed;
} }
static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
struct task_struct *tsk,
struct files_struct *files)
{
if (test_bit(0, &ctx->cq_check_overflow)) {
/* iopoll syncs against uring_lock, not completion_lock */
if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_lock(&ctx->uring_lock);
__io_cqring_overflow_flush(ctx, force, tsk, files);
if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_unlock(&ctx->uring_lock);
}
}
static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags) static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -2127,14 +2162,14 @@ static void __io_req_task_submit(struct io_kiocb *req) ...@@ -2127,14 +2162,14 @@ static void __io_req_task_submit(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
if (!__io_sq_thread_acquire_mm(ctx) && mutex_lock(&ctx->uring_lock);
!__io_sq_thread_acquire_files(ctx)) { if (!ctx->sqo_dead &&
mutex_lock(&ctx->uring_lock); !__io_sq_thread_acquire_mm(ctx) &&
!__io_sq_thread_acquire_files(ctx))
__io_queue_sqe(req, NULL); __io_queue_sqe(req, NULL);
mutex_unlock(&ctx->uring_lock); else
} else {
__io_req_task_cancel(req, -EFAULT); __io_req_task_cancel(req, -EFAULT);
} mutex_unlock(&ctx->uring_lock);
} }
static void io_req_task_submit(struct callback_head *cb) static void io_req_task_submit(struct callback_head *cb)
...@@ -2313,20 +2348,8 @@ static void io_double_put_req(struct io_kiocb *req) ...@@ -2313,20 +2348,8 @@ static void io_double_put_req(struct io_kiocb *req)
io_free_req(req); io_free_req(req);
} }
static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush) static unsigned io_cqring_events(struct io_ring_ctx *ctx)
{ {
if (test_bit(0, &ctx->cq_check_overflow)) {
/*
* noflush == true is from the waitqueue handler, just ensure
* we wake up the task, and the next invocation will flush the
* entries. We cannot safely to it from here.
*/
if (noflush)
return -1U;
io_cqring_overflow_flush(ctx, false, NULL, NULL);
}
/* See comment at the top of this file */ /* See comment at the top of this file */
smp_rmb(); smp_rmb();
return __io_cqring_events(ctx); return __io_cqring_events(ctx);
...@@ -2424,8 +2447,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, ...@@ -2424,8 +2447,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
} }
io_commit_cqring(ctx); io_commit_cqring(ctx);
if (ctx->flags & IORING_SETUP_SQPOLL) io_cqring_ev_posted_iopoll(ctx);
io_cqring_ev_posted(ctx);
io_req_free_batch_finish(ctx, &rb); io_req_free_batch_finish(ctx, &rb);
if (!list_empty(&again)) if (!list_empty(&again))
...@@ -2551,7 +2573,9 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) ...@@ -2551,7 +2573,9 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
* If we do, we can potentially be spinning for commands that * If we do, we can potentially be spinning for commands that
* already triggered a CQE (eg in error). * already triggered a CQE (eg in error).
*/ */
if (io_cqring_events(ctx, false)) if (test_bit(0, &ctx->cq_check_overflow))
__io_cqring_overflow_flush(ctx, false, NULL, NULL);
if (io_cqring_events(ctx))
break; break;
/* /*
...@@ -2668,6 +2692,8 @@ static bool io_rw_reissue(struct io_kiocb *req, long res) ...@@ -2668,6 +2692,8 @@ static bool io_rw_reissue(struct io_kiocb *req, long res)
if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker()) if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
return false; return false;
lockdep_assert_held(&req->ctx->uring_lock);
ret = io_sq_thread_acquire_mm_files(req->ctx, req); ret = io_sq_thread_acquire_mm_files(req->ctx, req);
if (io_resubmit_prep(req, ret)) { if (io_resubmit_prep(req, ret)) {
...@@ -6826,7 +6852,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) ...@@ -6826,7 +6852,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
/* if we have a backlog and couldn't flush it all, return BUSY */ /* if we have a backlog and couldn't flush it all, return BUSY */
if (test_bit(0, &ctx->sq_check_overflow)) { if (test_bit(0, &ctx->sq_check_overflow)) {
if (!io_cqring_overflow_flush(ctx, false, NULL, NULL)) if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
return -EBUSY; return -EBUSY;
} }
...@@ -6928,7 +6954,8 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) ...@@ -6928,7 +6954,8 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
if (!list_empty(&ctx->iopoll_list)) if (!list_empty(&ctx->iopoll_list))
io_do_iopoll(ctx, &nr_events, 0); io_do_iopoll(ctx, &nr_events, 0);
if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs))) if (to_submit && !ctx->sqo_dead &&
likely(!percpu_ref_is_dying(&ctx->refs)))
ret = io_submit_sqes(ctx, to_submit); ret = io_submit_sqes(ctx, to_submit);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
} }
...@@ -7089,7 +7116,7 @@ struct io_wait_queue { ...@@ -7089,7 +7116,7 @@ struct io_wait_queue {
unsigned nr_timeouts; unsigned nr_timeouts;
}; };
static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush) static inline bool io_should_wake(struct io_wait_queue *iowq)
{ {
struct io_ring_ctx *ctx = iowq->ctx; struct io_ring_ctx *ctx = iowq->ctx;
...@@ -7098,7 +7125,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush) ...@@ -7098,7 +7125,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
* started waiting. For timeouts, we always want to return to userspace, * started waiting. For timeouts, we always want to return to userspace,
* regardless of event count. * regardless of event count.
*/ */
return io_cqring_events(ctx, noflush) >= iowq->to_wait || return io_cqring_events(ctx) >= iowq->to_wait ||
atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
} }
...@@ -7108,11 +7135,13 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, ...@@ -7108,11 +7135,13 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
wq); wq);
/* use noflush == true, as we can't safely rely on locking context */ /*
if (!io_should_wake(iowq, true)) * Cannot safely flush overflowed CQEs from here, ensure we wake up
return -1; * the task, and the next invocation will do it.
*/
return autoremove_wake_function(curr, mode, wake_flags, key); if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
return autoremove_wake_function(curr, mode, wake_flags, key);
return -1;
} }
static int io_run_task_work_sig(void) static int io_run_task_work_sig(void)
...@@ -7149,7 +7178,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, ...@@ -7149,7 +7178,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
int ret = 0; int ret = 0;
do { do {
if (io_cqring_events(ctx, false) >= min_events) io_cqring_overflow_flush(ctx, false, NULL, NULL);
if (io_cqring_events(ctx) >= min_events)
return 0; return 0;
if (!io_run_task_work()) if (!io_run_task_work())
break; break;
...@@ -7177,6 +7207,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, ...@@ -7177,6 +7207,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
trace_io_uring_cqring_wait(ctx, min_events); trace_io_uring_cqring_wait(ctx, min_events);
do { do {
io_cqring_overflow_flush(ctx, false, NULL, NULL);
prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
TASK_INTERRUPTIBLE); TASK_INTERRUPTIBLE);
/* make sure we run task_work before checking for signals */ /* make sure we run task_work before checking for signals */
...@@ -7185,8 +7216,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, ...@@ -7185,8 +7216,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
continue; continue;
else if (ret < 0) else if (ret < 0)
break; break;
if (io_should_wake(&iowq, false)) if (io_should_wake(&iowq))
break; break;
if (test_bit(0, &ctx->cq_check_overflow))
continue;
if (uts) { if (uts) {
timeout = schedule_timeout(timeout); timeout = schedule_timeout(timeout);
if (timeout == 0) { if (timeout == 0) {
...@@ -7684,12 +7717,12 @@ static struct fixed_file_ref_node *alloc_fixed_file_ref_node( ...@@ -7684,12 +7717,12 @@ static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL); ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
if (!ref_node) if (!ref_node)
return ERR_PTR(-ENOMEM); return NULL;
if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero, if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
0, GFP_KERNEL)) { 0, GFP_KERNEL)) {
kfree(ref_node); kfree(ref_node);
return ERR_PTR(-ENOMEM); return NULL;
} }
INIT_LIST_HEAD(&ref_node->node); INIT_LIST_HEAD(&ref_node->node);
INIT_LIST_HEAD(&ref_node->file_list); INIT_LIST_HEAD(&ref_node->file_list);
...@@ -7783,9 +7816,9 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, ...@@ -7783,9 +7816,9 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
} }
ref_node = alloc_fixed_file_ref_node(ctx); ref_node = alloc_fixed_file_ref_node(ctx);
if (IS_ERR(ref_node)) { if (!ref_node) {
io_sqe_files_unregister(ctx); io_sqe_files_unregister(ctx);
return PTR_ERR(ref_node); return -ENOMEM;
} }
io_sqe_files_set_node(file_data, ref_node); io_sqe_files_set_node(file_data, ref_node);
...@@ -7885,8 +7918,8 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, ...@@ -7885,8 +7918,8 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
return -EINVAL; return -EINVAL;
ref_node = alloc_fixed_file_ref_node(ctx); ref_node = alloc_fixed_file_ref_node(ctx);
if (IS_ERR(ref_node)) if (!ref_node)
return PTR_ERR(ref_node); return -ENOMEM;
done = 0; done = 0;
fds = u64_to_user_ptr(up->fds); fds = u64_to_user_ptr(up->fds);
...@@ -8624,7 +8657,8 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait) ...@@ -8624,7 +8657,8 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
smp_rmb(); smp_rmb();
if (!io_sqring_full(ctx)) if (!io_sqring_full(ctx))
mask |= EPOLLOUT | EPOLLWRNORM; mask |= EPOLLOUT | EPOLLWRNORM;
if (io_cqring_events(ctx, false)) io_cqring_overflow_flush(ctx, false, NULL, NULL);
if (io_cqring_events(ctx))
mask |= EPOLLIN | EPOLLRDNORM; mask |= EPOLLIN | EPOLLRDNORM;
return mask; return mask;
...@@ -8663,7 +8697,7 @@ static void io_ring_exit_work(struct work_struct *work) ...@@ -8663,7 +8697,7 @@ static void io_ring_exit_work(struct work_struct *work)
* as nobody else will be looking for them. * as nobody else will be looking for them.
*/ */
do { do {
io_iopoll_try_reap_events(ctx); __io_uring_cancel_task_requests(ctx, NULL);
} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)); } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
io_ring_ctx_free(ctx); io_ring_ctx_free(ctx);
} }
...@@ -8679,10 +8713,14 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) ...@@ -8679,10 +8713,14 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{ {
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
percpu_ref_kill(&ctx->refs); percpu_ref_kill(&ctx->refs);
if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
ctx->sqo_dead = 1;
/* if force is set, the ring is going away. always drop after that */ /* if force is set, the ring is going away. always drop after that */
ctx->cq_overflow_flushed = 1; ctx->cq_overflow_flushed = 1;
if (ctx->rings) if (ctx->rings)
io_cqring_overflow_flush(ctx, true, NULL, NULL); __io_cqring_overflow_flush(ctx, true, NULL, NULL);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
io_kill_timeouts(ctx, NULL, NULL); io_kill_timeouts(ctx, NULL, NULL);
...@@ -8818,9 +8856,11 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, ...@@ -8818,9 +8856,11 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
enum io_wq_cancel cret; enum io_wq_cancel cret;
bool ret = false; bool ret = false;
cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true); if (ctx->io_wq) {
if (cret != IO_WQ_CANCEL_NOTFOUND) cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
ret = true; &cancel, true);
ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
}
/* SQPOLL thread does its own polling */ /* SQPOLL thread does its own polling */
if (!(ctx->flags & IORING_SETUP_SQPOLL)) { if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
...@@ -8839,6 +8879,18 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, ...@@ -8839,6 +8879,18 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
} }
} }
static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
{
WARN_ON_ONCE(ctx->sqo_task != current);
mutex_lock(&ctx->uring_lock);
ctx->sqo_dead = 1;
mutex_unlock(&ctx->uring_lock);
/* make sure callers enter the ring to get error */
io_ring_set_wakeup_flag(ctx);
}
/* /*
* We need to iteratively cancel requests, in case a request has dependent * We need to iteratively cancel requests, in case a request has dependent
* hard links. These persist even for failure of cancelations, hence keep * hard links. These persist even for failure of cancelations, hence keep
...@@ -8850,15 +8902,15 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, ...@@ -8850,15 +8902,15 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
struct task_struct *task = current; struct task_struct *task = current;
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
/* for SQPOLL only sqo_task has task notes */
io_disable_sqo_submit(ctx);
task = ctx->sq_data->thread; task = ctx->sq_data->thread;
atomic_inc(&task->io_uring->in_idle); atomic_inc(&task->io_uring->in_idle);
io_sq_thread_park(ctx->sq_data); io_sq_thread_park(ctx->sq_data);
} }
io_cancel_defer_files(ctx, task, files); io_cancel_defer_files(ctx, task, files);
io_ring_submit_lock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
io_cqring_overflow_flush(ctx, true, task, files); io_cqring_overflow_flush(ctx, true, task, files);
io_ring_submit_unlock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
if (!files) if (!files)
__io_uring_cancel_task_requests(ctx, task); __io_uring_cancel_task_requests(ctx, task);
...@@ -8931,20 +8983,12 @@ static void io_uring_del_task_file(struct file *file) ...@@ -8931,20 +8983,12 @@ static void io_uring_del_task_file(struct file *file)
fput(file); fput(file);
} }
/* static void io_uring_remove_task_files(struct io_uring_task *tctx)
* Drop task note for this file if we're the only ones that hold it after
* pending fput()
*/
static void io_uring_attempt_task_drop(struct file *file)
{ {
if (!current->io_uring) struct file *file;
return; unsigned long index;
/*
* fput() is pending, will be 2 if the only other ref is our potential xa_for_each(&tctx->xa, index, file)
* task file note. If the task is exiting, drop regardless of count.
*/
if (fatal_signal_pending(current) || (current->flags & PF_EXITING) ||
atomic_long_read(&file->f_count) == 2)
io_uring_del_task_file(file); io_uring_del_task_file(file);
} }
...@@ -8956,16 +9000,12 @@ void __io_uring_files_cancel(struct files_struct *files) ...@@ -8956,16 +9000,12 @@ void __io_uring_files_cancel(struct files_struct *files)
/* make sure overflow events are dropped */ /* make sure overflow events are dropped */
atomic_inc(&tctx->in_idle); atomic_inc(&tctx->in_idle);
xa_for_each(&tctx->xa, index, file)
xa_for_each(&tctx->xa, index, file) { io_uring_cancel_task_requests(file->private_data, files);
struct io_ring_ctx *ctx = file->private_data;
io_uring_cancel_task_requests(ctx, files);
if (files)
io_uring_del_task_file(file);
}
atomic_dec(&tctx->in_idle); atomic_dec(&tctx->in_idle);
if (files)
io_uring_remove_task_files(tctx);
} }
static s64 tctx_inflight(struct io_uring_task *tctx) static s64 tctx_inflight(struct io_uring_task *tctx)
...@@ -9028,11 +9068,39 @@ void __io_uring_task_cancel(void) ...@@ -9028,11 +9068,39 @@ void __io_uring_task_cancel(void)
} while (1); } while (1);
atomic_dec(&tctx->in_idle); atomic_dec(&tctx->in_idle);
io_uring_remove_task_files(tctx);
} }
static int io_uring_flush(struct file *file, void *data) static int io_uring_flush(struct file *file, void *data)
{ {
io_uring_attempt_task_drop(file); struct io_uring_task *tctx = current->io_uring;
struct io_ring_ctx *ctx = file->private_data;
if (!tctx)
return 0;
/* we should have cancelled and erased it before PF_EXITING */
WARN_ON_ONCE((current->flags & PF_EXITING) &&
xa_load(&tctx->xa, (unsigned long)file));
/*
* fput() is pending, will be 2 if the only other ref is our potential
* task file note. If the task is exiting, drop regardless of count.
*/
if (atomic_long_read(&file->f_count) != 2)
return 0;
if (ctx->flags & IORING_SETUP_SQPOLL) {
/* there is only one file note, which is owned by sqo_task */
WARN_ON_ONCE((ctx->sqo_task == current) ==
!xa_load(&tctx->xa, (unsigned long)file));
io_disable_sqo_submit(ctx);
}
if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current)
io_uring_del_task_file(file);
return 0; return 0;
} }
...@@ -9106,8 +9174,9 @@ static unsigned long io_uring_nommu_get_unmapped_area(struct file *file, ...@@ -9106,8 +9174,9 @@ static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
#endif /* !CONFIG_MMU */ #endif /* !CONFIG_MMU */
static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
{ {
int ret = 0;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
do { do {
...@@ -9116,6 +9185,11 @@ static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) ...@@ -9116,6 +9185,11 @@ static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE); prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
if (unlikely(ctx->sqo_dead)) {
ret = -EOWNERDEAD;
goto out;
}
if (!io_sqring_full(ctx)) if (!io_sqring_full(ctx))
break; break;
...@@ -9123,6 +9197,8 @@ static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) ...@@ -9123,6 +9197,8 @@ static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
} while (!signal_pending(current)); } while (!signal_pending(current));
finish_wait(&ctx->sqo_sq_wait, &wait); finish_wait(&ctx->sqo_sq_wait, &wait);
out:
return ret;
} }
static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz, static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
...@@ -9194,17 +9270,18 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, ...@@ -9194,17 +9270,18 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
*/ */
ret = 0; ret = 0;
if (ctx->flags & IORING_SETUP_SQPOLL) { if (ctx->flags & IORING_SETUP_SQPOLL) {
if (!list_empty_careful(&ctx->cq_overflow_list)) { io_cqring_overflow_flush(ctx, false, NULL, NULL);
bool needs_lock = ctx->flags & IORING_SETUP_IOPOLL;
io_ring_submit_lock(ctx, needs_lock); ret = -EOWNERDEAD;
io_cqring_overflow_flush(ctx, false, NULL, NULL); if (unlikely(ctx->sqo_dead))
io_ring_submit_unlock(ctx, needs_lock); goto out;
}
if (flags & IORING_ENTER_SQ_WAKEUP) if (flags & IORING_ENTER_SQ_WAKEUP)
wake_up(&ctx->sq_data->wait); wake_up(&ctx->sq_data->wait);
if (flags & IORING_ENTER_SQ_WAIT) if (flags & IORING_ENTER_SQ_WAIT) {
io_sqpoll_wait_sq(ctx); ret = io_sqpoll_wait_sq(ctx);
if (ret)
goto out;
}
submitted = to_submit; submitted = to_submit;
} else if (to_submit) { } else if (to_submit) {
ret = io_uring_add_task_file(ctx, f.file); ret = io_uring_add_task_file(ctx, f.file);
...@@ -9631,6 +9708,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, ...@@ -9631,6 +9708,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
return ret; return ret;
err: err:
io_disable_sqo_submit(ctx);
io_ring_ctx_wait_and_kill(ctx); io_ring_ctx_wait_and_kill(ctx);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment