Commit ef7b1a0e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-5.11-2021-01-24' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:
 "Still need a final cancelation fix that isn't quite done done,
  expected in the next day or two. That said, this contains:

   - Wakeup fix for IOPOLL requests

   - SQPOLL split close op handling fix

   - Ensure that any use of io_uring fd itself is marked as inflight

   - Short non-regular file read fix (Pavel)

   - Fix up bad false positive warning (Pavel)

   - SQPOLL fixes (Pavel)

   - In-flight removal fix (Pavel)"

* tag 'io_uring-5.11-2021-01-24' of git://git.kernel.dk/linux-block:
  io_uring: account io_uring internal files as REQ_F_INFLIGHT
  io_uring: fix sleeping under spin in __io_clean_op
  io_uring: fix short read retries for non-reg files
  io_uring: fix SQPOLL IORING_OP_CLOSE cancelation state
  io_uring: fix skipping disabling sqo on exec
  io_uring: fix uring_flush in exit_files() warning
  io_uring: fix false positive sqo warning on flush
  io_uring: iopoll requests should also wake task ->in_idle state
parents a692a610 02a13674
...@@ -1025,6 +1025,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req, ...@@ -1025,6 +1025,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
const struct iovec *fast_iov, const struct iovec *fast_iov,
struct iov_iter *iter, bool force); struct iov_iter *iter, bool force);
static void io_req_drop_files(struct io_kiocb *req);
static struct kmem_cache *req_cachep; static struct kmem_cache *req_cachep;
...@@ -1048,8 +1049,7 @@ EXPORT_SYMBOL(io_uring_get_socket); ...@@ -1048,8 +1049,7 @@ EXPORT_SYMBOL(io_uring_get_socket);
static inline void io_clean_op(struct io_kiocb *req) static inline void io_clean_op(struct io_kiocb *req)
{ {
if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED | if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
REQ_F_INFLIGHT))
__io_clean_op(req); __io_clean_op(req);
} }
...@@ -1075,8 +1075,11 @@ static bool io_match_task(struct io_kiocb *head, ...@@ -1075,8 +1075,11 @@ static bool io_match_task(struct io_kiocb *head,
return true; return true;
io_for_each_link(req, head) { io_for_each_link(req, head) {
if ((req->flags & REQ_F_WORK_INITIALIZED) && if (!(req->flags & REQ_F_WORK_INITIALIZED))
(req->work.flags & IO_WQ_WORK_FILES) && continue;
if (req->file && req->file->f_op == &io_uring_fops)
return true;
if ((req->work.flags & IO_WQ_WORK_FILES) &&
req->work.identity->files == files) req->work.identity->files == files)
return true; return true;
} }
...@@ -1394,6 +1397,8 @@ static void io_req_clean_work(struct io_kiocb *req) ...@@ -1394,6 +1397,8 @@ static void io_req_clean_work(struct io_kiocb *req)
free_fs_struct(fs); free_fs_struct(fs);
req->work.flags &= ~IO_WQ_WORK_FS; req->work.flags &= ~IO_WQ_WORK_FS;
} }
if (req->flags & REQ_F_INFLIGHT)
io_req_drop_files(req);
io_put_identity(req->task->io_uring, req); io_put_identity(req->task->io_uring, req);
} }
...@@ -1503,11 +1508,14 @@ static bool io_grab_identity(struct io_kiocb *req) ...@@ -1503,11 +1508,14 @@ static bool io_grab_identity(struct io_kiocb *req)
return false; return false;
atomic_inc(&id->files->count); atomic_inc(&id->files->count);
get_nsproxy(id->nsproxy); get_nsproxy(id->nsproxy);
req->flags |= REQ_F_INFLIGHT;
spin_lock_irq(&ctx->inflight_lock); if (!(req->flags & REQ_F_INFLIGHT)) {
list_add(&req->inflight_entry, &ctx->inflight_list); req->flags |= REQ_F_INFLIGHT;
spin_unlock_irq(&ctx->inflight_lock);
spin_lock_irq(&ctx->inflight_lock);
list_add(&req->inflight_entry, &ctx->inflight_list);
spin_unlock_irq(&ctx->inflight_lock);
}
req->work.flags |= IO_WQ_WORK_FILES; req->work.flags |= IO_WQ_WORK_FILES;
} }
if (!(req->work.flags & IO_WQ_WORK_MM) && if (!(req->work.flags & IO_WQ_WORK_MM) &&
...@@ -2270,6 +2278,8 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx, ...@@ -2270,6 +2278,8 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
struct io_uring_task *tctx = rb->task->io_uring; struct io_uring_task *tctx = rb->task->io_uring;
percpu_counter_sub(&tctx->inflight, rb->task_refs); percpu_counter_sub(&tctx->inflight, rb->task_refs);
if (atomic_read(&tctx->in_idle))
wake_up(&tctx->wait);
put_task_struct_many(rb->task, rb->task_refs); put_task_struct_many(rb->task, rb->task_refs);
rb->task = NULL; rb->task = NULL;
} }
...@@ -2288,6 +2298,8 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) ...@@ -2288,6 +2298,8 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
struct io_uring_task *tctx = rb->task->io_uring; struct io_uring_task *tctx = rb->task->io_uring;
percpu_counter_sub(&tctx->inflight, rb->task_refs); percpu_counter_sub(&tctx->inflight, rb->task_refs);
if (atomic_read(&tctx->in_idle))
wake_up(&tctx->wait);
put_task_struct_many(rb->task, rb->task_refs); put_task_struct_many(rb->task, rb->task_refs);
} }
rb->task = req->task; rb->task = req->task;
...@@ -3548,7 +3560,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, ...@@ -3548,7 +3560,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
/* read it all, or we did blocking attempt. no retry. */ /* read it all, or we did blocking attempt. no retry. */
if (!iov_iter_count(iter) || !force_nonblock || if (!iov_iter_count(iter) || !force_nonblock ||
(req->file->f_flags & O_NONBLOCK)) (req->file->f_flags & O_NONBLOCK) || !(req->flags & REQ_F_ISREG))
goto done; goto done;
io_size -= ret; io_size -= ret;
...@@ -4468,7 +4480,6 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -4468,7 +4480,6 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
* io_wq_work.flags, so initialize io_wq_work firstly. * io_wq_work.flags, so initialize io_wq_work firstly.
*/ */
io_req_init_async(req); io_req_init_async(req);
req->work.flags |= IO_WQ_WORK_NO_CANCEL;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL; return -EINVAL;
...@@ -4501,6 +4512,8 @@ static int io_close(struct io_kiocb *req, bool force_nonblock, ...@@ -4501,6 +4512,8 @@ static int io_close(struct io_kiocb *req, bool force_nonblock,
/* if the file has a flush method, be safe and punt to async */ /* if the file has a flush method, be safe and punt to async */
if (close->put_file->f_op->flush && force_nonblock) { if (close->put_file->f_op->flush && force_nonblock) {
/* not safe to cancel at this point */
req->work.flags |= IO_WQ_WORK_NO_CANCEL;
/* was never set, but play safe */ /* was never set, but play safe */
req->flags &= ~REQ_F_NOWAIT; req->flags &= ~REQ_F_NOWAIT;
/* avoid grabbing files - we don't need the files */ /* avoid grabbing files - we don't need the files */
...@@ -6157,8 +6170,10 @@ static void io_req_drop_files(struct io_kiocb *req) ...@@ -6157,8 +6170,10 @@ static void io_req_drop_files(struct io_kiocb *req)
struct io_uring_task *tctx = req->task->io_uring; struct io_uring_task *tctx = req->task->io_uring;
unsigned long flags; unsigned long flags;
put_files_struct(req->work.identity->files); if (req->work.flags & IO_WQ_WORK_FILES) {
put_nsproxy(req->work.identity->nsproxy); put_files_struct(req->work.identity->files);
put_nsproxy(req->work.identity->nsproxy);
}
spin_lock_irqsave(&ctx->inflight_lock, flags); spin_lock_irqsave(&ctx->inflight_lock, flags);
list_del(&req->inflight_entry); list_del(&req->inflight_entry);
spin_unlock_irqrestore(&ctx->inflight_lock, flags); spin_unlock_irqrestore(&ctx->inflight_lock, flags);
...@@ -6225,9 +6240,6 @@ static void __io_clean_op(struct io_kiocb *req) ...@@ -6225,9 +6240,6 @@ static void __io_clean_op(struct io_kiocb *req)
} }
req->flags &= ~REQ_F_NEED_CLEANUP; req->flags &= ~REQ_F_NEED_CLEANUP;
} }
if (req->flags & REQ_F_INFLIGHT)
io_req_drop_files(req);
} }
static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock, static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
...@@ -6446,6 +6458,15 @@ static struct file *io_file_get(struct io_submit_state *state, ...@@ -6446,6 +6458,15 @@ static struct file *io_file_get(struct io_submit_state *state,
file = __io_file_get(state, fd); file = __io_file_get(state, fd);
} }
if (file && file->f_op == &io_uring_fops) {
io_req_init_async(req);
req->flags |= REQ_F_INFLIGHT;
spin_lock_irq(&ctx->inflight_lock);
list_add(&req->inflight_entry, &ctx->inflight_list);
spin_unlock_irq(&ctx->inflight_lock);
}
return file; return file;
} }
...@@ -8856,8 +8877,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx, ...@@ -8856,8 +8877,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
spin_lock_irq(&ctx->inflight_lock); spin_lock_irq(&ctx->inflight_lock);
list_for_each_entry(req, &ctx->inflight_list, inflight_entry) { list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
if (req->task != task || if (!io_match_task(req, task, files))
req->work.identity->files != files)
continue; continue;
found = true; found = true;
break; break;
...@@ -8874,6 +8894,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx, ...@@ -8874,6 +8894,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true); io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
io_poll_remove_all(ctx, task, files); io_poll_remove_all(ctx, task, files);
io_kill_timeouts(ctx, task, files); io_kill_timeouts(ctx, task, files);
io_cqring_overflow_flush(ctx, true, task, files);
/* cancellations _may_ trigger task work */ /* cancellations _may_ trigger task work */
io_run_task_work(); io_run_task_work();
schedule(); schedule();
...@@ -8914,8 +8935,6 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, ...@@ -8914,8 +8935,6 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
static void io_disable_sqo_submit(struct io_ring_ctx *ctx) static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
{ {
WARN_ON_ONCE(ctx->sqo_task != current);
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
ctx->sqo_dead = 1; ctx->sqo_dead = 1;
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
...@@ -8937,6 +8956,7 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, ...@@ -8937,6 +8956,7 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
/* for SQPOLL only sqo_task has task notes */ /* for SQPOLL only sqo_task has task notes */
WARN_ON_ONCE(ctx->sqo_task != current);
io_disable_sqo_submit(ctx); io_disable_sqo_submit(ctx);
task = ctx->sq_data->thread; task = ctx->sq_data->thread;
atomic_inc(&task->io_uring->in_idle); atomic_inc(&task->io_uring->in_idle);
...@@ -9082,6 +9102,10 @@ void __io_uring_task_cancel(void) ...@@ -9082,6 +9102,10 @@ void __io_uring_task_cancel(void)
/* make sure overflow events are dropped */ /* make sure overflow events are dropped */
atomic_inc(&tctx->in_idle); atomic_inc(&tctx->in_idle);
/* trigger io_disable_sqo_submit() */
if (tctx->sqpoll)
__io_uring_files_cancel(NULL);
do { do {
/* read completions before cancelations */ /* read completions before cancelations */
inflight = tctx_inflight(tctx); inflight = tctx_inflight(tctx);
...@@ -9128,7 +9152,10 @@ static int io_uring_flush(struct file *file, void *data) ...@@ -9128,7 +9152,10 @@ static int io_uring_flush(struct file *file, void *data)
if (ctx->flags & IORING_SETUP_SQPOLL) { if (ctx->flags & IORING_SETUP_SQPOLL) {
/* there is only one file note, which is owned by sqo_task */ /* there is only one file note, which is owned by sqo_task */
WARN_ON_ONCE((ctx->sqo_task == current) == WARN_ON_ONCE(ctx->sqo_task != current &&
xa_load(&tctx->xa, (unsigned long)file));
/* sqo_dead check is for when this happens after cancellation */
WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
!xa_load(&tctx->xa, (unsigned long)file)); !xa_load(&tctx->xa, (unsigned long)file));
io_disable_sqo_submit(ctx); io_disable_sqo_submit(ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment