Commit 263782c1 authored by Benjamin LaHaise's avatar Benjamin LaHaise

aio: protect reqs_available updates from changes in interrupt handlers

As of commit f8567a38 it is now possible to
have put_reqs_available() called from irq context.  While put_reqs_available()
is per cpu, it did not protect itself from interrupts on the same CPU.  This
lead to aio_complete() corrupting the available io requests count when run
under a heavy O_DIRECT workloads as reported by Robert Elliott.  Fix this by
disabling irq updates around the per cpu batch updates of reqs_available.

Many thanks to Robert and folks for testing and tracking this down.
Reported-by: default avatarRobert Elliot <Elliott@hp.com>
Tested-by: default avatarRobert Elliot <Elliott@hp.com>
Signed-off-by: default avatarBenjamin LaHaise <bcrl@kvack.org>
Cc: Jens Axboe <axboe@kernel.dk>, Christoph Hellwig <hch@infradead.org>
Cc: stable@vger.kenel.org
parent 1795cd9b
...@@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm) ...@@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm)
static void put_reqs_available(struct kioctx *ctx, unsigned nr) static void put_reqs_available(struct kioctx *ctx, unsigned nr)
{ {
struct kioctx_cpu *kcpu; struct kioctx_cpu *kcpu;
unsigned long flags;
preempt_disable(); preempt_disable();
kcpu = this_cpu_ptr(ctx->cpu); kcpu = this_cpu_ptr(ctx->cpu);
local_irq_save(flags);
kcpu->reqs_available += nr; kcpu->reqs_available += nr;
while (kcpu->reqs_available >= ctx->req_batch * 2) { while (kcpu->reqs_available >= ctx->req_batch * 2) {
kcpu->reqs_available -= ctx->req_batch; kcpu->reqs_available -= ctx->req_batch;
atomic_add(ctx->req_batch, &ctx->reqs_available); atomic_add(ctx->req_batch, &ctx->reqs_available);
} }
local_irq_restore(flags);
preempt_enable(); preempt_enable();
} }
...@@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx) ...@@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx)
{ {
struct kioctx_cpu *kcpu; struct kioctx_cpu *kcpu;
bool ret = false; bool ret = false;
unsigned long flags;
preempt_disable(); preempt_disable();
kcpu = this_cpu_ptr(ctx->cpu); kcpu = this_cpu_ptr(ctx->cpu);
local_irq_save(flags);
if (!kcpu->reqs_available) { if (!kcpu->reqs_available) {
int old, avail = atomic_read(&ctx->reqs_available); int old, avail = atomic_read(&ctx->reqs_available);
...@@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx) ...@@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx)
ret = true; ret = true;
kcpu->reqs_available--; kcpu->reqs_available--;
out: out:
local_irq_restore(flags);
preempt_enable(); preempt_enable();
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment