Commit 11c0239a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:
 "We still have a pending fix for a cancelation issue, but it's still
  being investigated. In the meantime:

   - Dead mm handling fix (Pavel)

   - SQPOLL setup error handling (Pavel)

   - Flush timeout sequence fix (Marcelo)

   - Missing finish_wait() for one exit case"

* tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block:
  io_uring: ensure finish_wait() is always called in __io_uring_task_cancel()
  io_uring: flush timeouts that should already have expired
  io_uring: do sqo disable on install_fd error
  io_uring: fix null-deref in io_disable_sqo_submit
  io_uring: don't take files/mm for a dead task
  io_uring: drop mm and files after task_work_run
parents acda701b a8d13dbc
...@@ -354,6 +354,7 @@ struct io_ring_ctx { ...@@ -354,6 +354,7 @@ struct io_ring_ctx {
unsigned cq_entries; unsigned cq_entries;
unsigned cq_mask; unsigned cq_mask;
atomic_t cq_timeouts; atomic_t cq_timeouts;
unsigned cq_last_tm_flush;
unsigned long cq_check_overflow; unsigned long cq_check_overflow;
struct wait_queue_head cq_wait; struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync; struct fasync_struct *cq_fasync;
...@@ -1106,6 +1107,9 @@ static void io_sq_thread_drop_mm_files(void) ...@@ -1106,6 +1107,9 @@ static void io_sq_thread_drop_mm_files(void)
static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx) static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
{ {
if (current->flags & PF_EXITING)
return -EFAULT;
if (!current->files) { if (!current->files) {
struct files_struct *files; struct files_struct *files;
struct nsproxy *nsproxy; struct nsproxy *nsproxy;
...@@ -1133,6 +1137,8 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx) ...@@ -1133,6 +1137,8 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
{ {
struct mm_struct *mm; struct mm_struct *mm;
if (current->flags & PF_EXITING)
return -EFAULT;
if (current->mm) if (current->mm)
return 0; return 0;
...@@ -1634,19 +1640,38 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx) ...@@ -1634,19 +1640,38 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
static void io_flush_timeouts(struct io_ring_ctx *ctx) static void io_flush_timeouts(struct io_ring_ctx *ctx)
{ {
while (!list_empty(&ctx->timeout_list)) { u32 seq;
if (list_empty(&ctx->timeout_list))
return;
seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
do {
u32 events_needed, events_got;
struct io_kiocb *req = list_first_entry(&ctx->timeout_list, struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
struct io_kiocb, timeout.list); struct io_kiocb, timeout.list);
if (io_is_timeout_noseq(req)) if (io_is_timeout_noseq(req))
break; break;
if (req->timeout.target_seq != ctx->cached_cq_tail
- atomic_read(&ctx->cq_timeouts)) /*
* Since seq can easily wrap around over time, subtract
* the last seq at which timeouts were flushed before comparing.
* Assuming not more than 2^31-1 events have happened since,
* these subtractions won't have wrapped, so we can check if
* target is in [last_seq, current_seq] by comparing the two.
*/
events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
events_got = seq - ctx->cq_last_tm_flush;
if (events_got < events_needed)
break; break;
list_del_init(&req->timeout.list); list_del_init(&req->timeout.list);
io_kill_timeout(req); io_kill_timeout(req);
} } while (!list_empty(&ctx->timeout_list));
ctx->cq_last_tm_flush = seq;
} }
static void io_commit_cqring(struct io_ring_ctx *ctx) static void io_commit_cqring(struct io_ring_ctx *ctx)
...@@ -5832,6 +5857,12 @@ static int io_timeout(struct io_kiocb *req) ...@@ -5832,6 +5857,12 @@ static int io_timeout(struct io_kiocb *req)
tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
req->timeout.target_seq = tail + off; req->timeout.target_seq = tail + off;
/* Update the last seq here in case io_flush_timeouts() hasn't.
* This is safe because ->completion_lock is held, and submissions
* and completions are never mixed in the same ->completion_lock section.
*/
ctx->cq_last_tm_flush = tail;
/* /*
* Insertion sort, ensuring the first entry in the list is always * Insertion sort, ensuring the first entry in the list is always
* the one we need first. * the one we need first.
...@@ -7056,6 +7087,7 @@ static int io_sq_thread(void *data) ...@@ -7056,6 +7087,7 @@ static int io_sq_thread(void *data)
if (sqt_spin || !time_after(jiffies, timeout)) { if (sqt_spin || !time_after(jiffies, timeout)) {
io_run_task_work(); io_run_task_work();
io_sq_thread_drop_mm_files();
cond_resched(); cond_resched();
if (sqt_spin) if (sqt_spin)
timeout = jiffies + sqd->sq_thread_idle; timeout = jiffies + sqd->sq_thread_idle;
...@@ -7093,6 +7125,7 @@ static int io_sq_thread(void *data) ...@@ -7093,6 +7125,7 @@ static int io_sq_thread(void *data)
} }
io_run_task_work(); io_run_task_work();
io_sq_thread_drop_mm_files();
if (cur_css) if (cur_css)
io_sq_thread_unassociate_blkcg(); io_sq_thread_unassociate_blkcg();
...@@ -8888,7 +8921,8 @@ static void io_disable_sqo_submit(struct io_ring_ctx *ctx) ...@@ -8888,7 +8921,8 @@ static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
/* make sure callers enter the ring to get error */ /* make sure callers enter the ring to get error */
io_ring_set_wakeup_flag(ctx); if (ctx->rings)
io_ring_set_wakeup_flag(ctx);
} }
/* /*
...@@ -9067,6 +9101,7 @@ void __io_uring_task_cancel(void) ...@@ -9067,6 +9101,7 @@ void __io_uring_task_cancel(void)
finish_wait(&tctx->wait, &wait); finish_wait(&tctx->wait, &wait);
} while (1); } while (1);
finish_wait(&tctx->wait, &wait);
atomic_dec(&tctx->in_idle); atomic_dec(&tctx->in_idle);
io_uring_remove_task_files(tctx); io_uring_remove_task_files(tctx);
...@@ -9700,6 +9735,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, ...@@ -9700,6 +9735,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
*/ */
ret = io_uring_install_fd(ctx, file); ret = io_uring_install_fd(ctx, file);
if (ret < 0) { if (ret < 0) {
io_disable_sqo_submit(ctx);
/* fput will clean it up */ /* fput will clean it up */
fput(file); fput(file);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment