Commit 97e7cdf1 authored by Joe Thornber's avatar Joe Thornber Committed by Mike Snitzer

dm io: simplify dec_count and sync_io

Remove the io struct off the stack in sync_io() and allocate it from
the mempool like is done in async_io().

dec_count() now always calls a callback function and always frees the io
struct back to the mempool (so sync_io and async_io share this pattern).
Signed-off-by: default avatarJoe Thornber <ejt@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 44fa816b
...@@ -33,7 +33,6 @@ struct dm_io_client { ...@@ -33,7 +33,6 @@ struct dm_io_client {
struct io { struct io {
unsigned long error_bits; unsigned long error_bits;
atomic_t count; atomic_t count;
struct completion *wait;
struct dm_io_client *client; struct dm_io_client *client;
io_notify_fn callback; io_notify_fn callback;
void *context; void *context;
...@@ -112,28 +111,27 @@ static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, ...@@ -112,28 +111,27 @@ static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
* We need an io object to keep track of the number of bios that * We need an io object to keep track of the number of bios that
* have been dispatched for a particular io. * have been dispatched for a particular io.
*---------------------------------------------------------------*/ *---------------------------------------------------------------*/
static void dec_count(struct io *io, unsigned int region, int error) static void complete_io(struct io *io)
{ {
if (error) unsigned long error_bits = io->error_bits;
set_bit(region, &io->error_bits); io_notify_fn fn = io->callback;
void *context = io->context;
if (atomic_dec_and_test(&io->count)) { if (io->vma_invalidate_size)
if (io->vma_invalidate_size) invalidate_kernel_vmap_range(io->vma_invalidate_address,
invalidate_kernel_vmap_range(io->vma_invalidate_address, io->vma_invalidate_size);
io->vma_invalidate_size);
if (io->wait) mempool_free(io, io->client->pool);
complete(io->wait); fn(error_bits, context);
}
else { static void dec_count(struct io *io, unsigned int region, int error)
unsigned long r = io->error_bits; {
io_notify_fn fn = io->callback; if (error)
void *context = io->context; set_bit(region, &io->error_bits);
mempool_free(io, io->client->pool); if (atomic_dec_and_test(&io->count))
fn(r, context); complete_io(io);
}
}
} }
static void endio(struct bio *bio, int error) static void endio(struct bio *bio, int error)
...@@ -376,41 +374,51 @@ static void dispatch_io(int rw, unsigned int num_regions, ...@@ -376,41 +374,51 @@ static void dispatch_io(int rw, unsigned int num_regions,
dec_count(io, 0, 0); dec_count(io, 0, 0);
} }
struct sync_io {
unsigned long error_bits;
struct completion wait;
};
static void sync_io_complete(unsigned long error, void *context)
{
struct sync_io *sio = context;
sio->error_bits = error;
complete(&sio->wait);
}
static int sync_io(struct dm_io_client *client, unsigned int num_regions, static int sync_io(struct dm_io_client *client, unsigned int num_regions,
struct dm_io_region *where, int rw, struct dpages *dp, struct dm_io_region *where, int rw, struct dpages *dp,
unsigned long *error_bits) unsigned long *error_bits)
{ {
/* struct io *io;
* gcc <= 4.3 can't do the alignment for stack variables, so we must struct sync_io sio;
* align it on our own.
* volatile prevents the optimizer from removing or reusing
* "io_" field from the stack frame (allowed in ANSI C).
*/
volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
DECLARE_COMPLETION_ONSTACK(wait);
if (num_regions > 1 && (rw & RW_MASK) != WRITE) { if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
WARN_ON(1); WARN_ON(1);
return -EIO; return -EIO;
} }
init_completion(&sio.wait);
io = mempool_alloc(client->pool, GFP_NOIO);
io->error_bits = 0; io->error_bits = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */ atomic_set(&io->count, 1); /* see dispatch_io() */
io->wait = &wait;
io->client = client; io->client = client;
io->callback = sync_io_complete;
io->context = &sio;
io->vma_invalidate_address = dp->vma_invalidate_address; io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size; io->vma_invalidate_size = dp->vma_invalidate_size;
dispatch_io(rw, num_regions, where, dp, io, 1); dispatch_io(rw, num_regions, where, dp, io, 1);
wait_for_completion_io(&wait); wait_for_completion_io(&sio.wait);
if (error_bits) if (error_bits)
*error_bits = io->error_bits; *error_bits = sio.error_bits;
return io->error_bits ? -EIO : 0; return sio.error_bits ? -EIO : 0;
} }
static int async_io(struct dm_io_client *client, unsigned int num_regions, static int async_io(struct dm_io_client *client, unsigned int num_regions,
...@@ -428,7 +436,6 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, ...@@ -428,7 +436,6 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
io = mempool_alloc(client->pool, GFP_NOIO); io = mempool_alloc(client->pool, GFP_NOIO);
io->error_bits = 0; io->error_bits = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */ atomic_set(&io->count, 1); /* see dispatch_io() */
io->wait = NULL;
io->client = client; io->client = client;
io->callback = fn; io->callback = fn;
io->context = context; io->context = context;
...@@ -481,9 +488,9 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp, ...@@ -481,9 +488,9 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
* New collapsed (a)synchronous interface. * New collapsed (a)synchronous interface.
* *
* If the IO is asynchronous (i.e. it has notify.fn), you must either unplug * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
* the queue with blk_unplug() some time later or set REQ_SYNC in * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw.
io_req->bi_rw. If you fail to do one of these, the IO will be submitted to * If you fail to do one of these, the IO will be submitted to the disk after
* the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. * q->unplug_delay, which defaults to 3ms in blk-settings.c.
*/ */
int dm_io(struct dm_io_request *io_req, unsigned num_regions, int dm_io(struct dm_io_request *io_req, unsigned num_regions,
struct dm_io_region *where, unsigned long *sync_error_bits) struct dm_io_region *where, unsigned long *sync_error_bits)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment