Commit a311c480 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kvack.org/~bcrl/aio-next

Pull aio fix and cleanups from Ben LaHaise:
 "This consists of a couple of code cleanups plus a minor bug fix"

* git://git.kvack.org/~bcrl/aio-next:
  aio: cleanup: flatten kill_ioctx()
  aio: report error from io_destroy() when threads race in io_destroy()
  fs/aio.c: Remove ctx parameter in kiocb_cancel
parents 05064084 fa88b6f8
...@@ -477,7 +477,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) ...@@ -477,7 +477,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
} }
EXPORT_SYMBOL(kiocb_set_cancel_fn); EXPORT_SYMBOL(kiocb_set_cancel_fn);
static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb) static int kiocb_cancel(struct kiocb *kiocb)
{ {
kiocb_cancel_fn *old, *cancel; kiocb_cancel_fn *old, *cancel;
...@@ -538,7 +538,7 @@ static void free_ioctx_users(struct percpu_ref *ref) ...@@ -538,7 +538,7 @@ static void free_ioctx_users(struct percpu_ref *ref)
struct kiocb, ki_list); struct kiocb, ki_list);
list_del_init(&req->ki_list); list_del_init(&req->ki_list);
kiocb_cancel(ctx, req); kiocb_cancel(req);
} }
spin_unlock_irq(&ctx->ctx_lock); spin_unlock_irq(&ctx->ctx_lock);
...@@ -727,42 +727,42 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) ...@@ -727,42 +727,42 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
* when the processes owning a context have all exited to encourage * when the processes owning a context have all exited to encourage
* the rapid destruction of the kioctx. * the rapid destruction of the kioctx.
*/ */
static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
struct completion *requests_done) struct completion *requests_done)
{ {
if (!atomic_xchg(&ctx->dead, 1)) { struct kioctx_table *table;
struct kioctx_table *table;
spin_lock(&mm->ioctx_lock); if (atomic_xchg(&ctx->dead, 1))
rcu_read_lock(); return -EINVAL;
table = rcu_dereference(mm->ioctx_table);
WARN_ON(ctx != table->table[ctx->id]);
table->table[ctx->id] = NULL;
rcu_read_unlock();
spin_unlock(&mm->ioctx_lock);
/* percpu_ref_kill() will do the necessary call_rcu() */ spin_lock(&mm->ioctx_lock);
wake_up_all(&ctx->wait); rcu_read_lock();
table = rcu_dereference(mm->ioctx_table);
/* WARN_ON(ctx != table->table[ctx->id]);
* It'd be more correct to do this in free_ioctx(), after all table->table[ctx->id] = NULL;
* the outstanding kiocbs have finished - but by then io_destroy rcu_read_unlock();
* has already returned, so io_setup() could potentially return spin_unlock(&mm->ioctx_lock);
* -EAGAIN with no ioctxs actually in use (as far as userspace
* could tell).
*/
aio_nr_sub(ctx->max_reqs);
if (ctx->mmap_size) /* percpu_ref_kill() will do the necessary call_rcu() */
vm_munmap(ctx->mmap_base, ctx->mmap_size); wake_up_all(&ctx->wait);
ctx->requests_done = requests_done; /*
percpu_ref_kill(&ctx->users); * It'd be more correct to do this in free_ioctx(), after all
} else { * the outstanding kiocbs have finished - but by then io_destroy
if (requests_done) * has already returned, so io_setup() could potentially return
complete(requests_done); * -EAGAIN with no ioctxs actually in use (as far as userspace
} * could tell).
*/
aio_nr_sub(ctx->max_reqs);
if (ctx->mmap_size)
vm_munmap(ctx->mmap_base, ctx->mmap_size);
ctx->requests_done = requests_done;
percpu_ref_kill(&ctx->users);
return 0;
} }
/* wait_on_sync_kiocb: /* wait_on_sync_kiocb:
...@@ -1219,21 +1219,23 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) ...@@ -1219,21 +1219,23 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
if (likely(NULL != ioctx)) { if (likely(NULL != ioctx)) {
struct completion requests_done = struct completion requests_done =
COMPLETION_INITIALIZER_ONSTACK(requests_done); COMPLETION_INITIALIZER_ONSTACK(requests_done);
int ret;
/* Pass requests_done to kill_ioctx() where it can be set /* Pass requests_done to kill_ioctx() where it can be set
* in a thread-safe way. If we try to set it here then we have * in a thread-safe way. If we try to set it here then we have
* a race condition if two io_destroy() called simultaneously. * a race condition if two io_destroy() called simultaneously.
*/ */
kill_ioctx(current->mm, ioctx, &requests_done); ret = kill_ioctx(current->mm, ioctx, &requests_done);
percpu_ref_put(&ioctx->users); percpu_ref_put(&ioctx->users);
/* Wait until all IO for the context are done. Otherwise kernel /* Wait until all IO for the context are done. Otherwise kernel
* keep using user-space buffers even if user thinks the context * keep using user-space buffers even if user thinks the context
* is destroyed. * is destroyed.
*/ */
wait_for_completion(&requests_done); if (!ret)
wait_for_completion(&requests_done);
return 0; return ret;
} }
pr_debug("EINVAL: io_destroy: invalid context id\n"); pr_debug("EINVAL: io_destroy: invalid context id\n");
return -EINVAL; return -EINVAL;
...@@ -1595,7 +1597,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, ...@@ -1595,7 +1597,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
kiocb = lookup_kiocb(ctx, iocb, key); kiocb = lookup_kiocb(ctx, iocb, key);
if (kiocb) if (kiocb)
ret = kiocb_cancel(ctx, kiocb); ret = kiocb_cancel(kiocb);
else else
ret = -EINVAL; ret = -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment