Commit 2d68449e authored by Kent Overstreet's avatar Kent Overstreet Committed by Linus Torvalds

aio: kill return value of aio_complete()

Nothing used the return value, and it probably wasn't possible to use it
safely for the locked versions (aio_complete(), aio_put_req()).  Just
kill it.
Signed-off-by: default avatarKent Overstreet <koverstreet@google.com>
Acked-by: default avatarZach Brown <zab@redhat.com>
Cc: Felipe Balbi <balbi@ti.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Asai Thambi S P <asamymuthupa@micron.com>
Cc: Selvan Mani <smani@micron.com>
Cc: Sam Bradshaw <sbradshaw@micron.com>
Acked-by: default avatarJeff Moyer <jmoyer@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Reviewed-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 162934de
...@@ -531,7 +531,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) ...@@ -531,7 +531,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
/* __aio_put_req /* __aio_put_req
* Returns true if this put was the last user of the request. * Returns true if this put was the last user of the request.
*/ */
static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) static void __aio_put_req(struct kioctx *ctx, struct kiocb *req)
{ {
dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
req, atomic_long_read(&req->ki_filp->f_count)); req, atomic_long_read(&req->ki_filp->f_count));
...@@ -541,7 +541,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) ...@@ -541,7 +541,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
req->ki_users--; req->ki_users--;
BUG_ON(req->ki_users < 0); BUG_ON(req->ki_users < 0);
if (likely(req->ki_users)) if (likely(req->ki_users))
return 0; return;
list_del(&req->ki_list); /* remove from active_reqs */ list_del(&req->ki_list); /* remove from active_reqs */
req->ki_cancel = NULL; req->ki_cancel = NULL;
req->ki_retry = NULL; req->ki_retry = NULL;
...@@ -549,21 +549,18 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) ...@@ -549,21 +549,18 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
fput(req->ki_filp); fput(req->ki_filp);
req->ki_filp = NULL; req->ki_filp = NULL;
really_put_req(ctx, req); really_put_req(ctx, req);
return 1;
} }
/* aio_put_req /* aio_put_req
* Returns true if this put was the last user of the kiocb, * Returns true if this put was the last user of the kiocb,
* false if the request is still in use. * false if the request is still in use.
*/ */
int aio_put_req(struct kiocb *req) void aio_put_req(struct kiocb *req)
{ {
struct kioctx *ctx = req->ki_ctx; struct kioctx *ctx = req->ki_ctx;
int ret;
spin_lock_irq(&ctx->ctx_lock); spin_lock_irq(&ctx->ctx_lock);
ret = __aio_put_req(ctx, req); __aio_put_req(ctx, req);
spin_unlock_irq(&ctx->ctx_lock); spin_unlock_irq(&ctx->ctx_lock);
return ret;
} }
EXPORT_SYMBOL(aio_put_req); EXPORT_SYMBOL(aio_put_req);
...@@ -593,10 +590,8 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) ...@@ -593,10 +590,8 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
/* aio_complete /* aio_complete
* Called when the io request on the given iocb is complete. * Called when the io request on the given iocb is complete.
* Returns true if this is the last user of the request. The
* only other user of the request can be the cancellation code.
*/ */
int aio_complete(struct kiocb *iocb, long res, long res2) void aio_complete(struct kiocb *iocb, long res, long res2)
{ {
struct kioctx *ctx = iocb->ki_ctx; struct kioctx *ctx = iocb->ki_ctx;
struct aio_ring_info *info; struct aio_ring_info *info;
...@@ -604,7 +599,6 @@ int aio_complete(struct kiocb *iocb, long res, long res2) ...@@ -604,7 +599,6 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
struct io_event *event; struct io_event *event;
unsigned long flags; unsigned long flags;
unsigned long tail; unsigned long tail;
int ret;
/* /*
* Special case handling for sync iocbs: * Special case handling for sync iocbs:
...@@ -618,7 +612,7 @@ int aio_complete(struct kiocb *iocb, long res, long res2) ...@@ -618,7 +612,7 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
iocb->ki_user_data = res; iocb->ki_user_data = res;
iocb->ki_users = 0; iocb->ki_users = 0;
wake_up_process(iocb->ki_obj.tsk); wake_up_process(iocb->ki_obj.tsk);
return 1; return;
} }
info = &ctx->ring_info; info = &ctx->ring_info;
...@@ -677,7 +671,7 @@ int aio_complete(struct kiocb *iocb, long res, long res2) ...@@ -677,7 +671,7 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
put_rq: put_rq:
/* everything turned out well, dispose of the aiocb. */ /* everything turned out well, dispose of the aiocb. */
ret = __aio_put_req(ctx, iocb); __aio_put_req(ctx, iocb);
/* /*
* We have to order our ring_info tail store above and test * We have to order our ring_info tail store above and test
...@@ -691,7 +685,6 @@ int aio_complete(struct kiocb *iocb, long res, long res2) ...@@ -691,7 +685,6 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
wake_up(&ctx->wait); wake_up(&ctx->wait);
spin_unlock_irqrestore(&ctx->ctx_lock, flags); spin_unlock_irqrestore(&ctx->ctx_lock, flags);
return ret;
} }
EXPORT_SYMBOL(aio_complete); EXPORT_SYMBOL(aio_complete);
......
...@@ -167,16 +167,16 @@ struct kioctx { ...@@ -167,16 +167,16 @@ struct kioctx {
/* prototypes */ /* prototypes */
#ifdef CONFIG_AIO #ifdef CONFIG_AIO
extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb); extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
extern int aio_put_req(struct kiocb *iocb); extern void aio_put_req(struct kiocb *iocb);
extern int aio_complete(struct kiocb *iocb, long res, long res2); extern void aio_complete(struct kiocb *iocb, long res, long res2);
struct mm_struct; struct mm_struct;
extern void exit_aio(struct mm_struct *mm); extern void exit_aio(struct mm_struct *mm);
extern long do_io_submit(aio_context_t ctx_id, long nr, extern long do_io_submit(aio_context_t ctx_id, long nr,
struct iocb __user *__user *iocbpp, bool compat); struct iocb __user *__user *iocbpp, bool compat);
#else #else
static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; } static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
static inline int aio_put_req(struct kiocb *iocb) { return 0; } static inline void aio_put_req(struct kiocb *iocb) { }
static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; } static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
struct mm_struct; struct mm_struct;
static inline void exit_aio(struct mm_struct *mm) { } static inline void exit_aio(struct mm_struct *mm) { }
static inline long do_io_submit(aio_context_t ctx_id, long nr, static inline long do_io_submit(aio_context_t ctx_id, long nr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment