Commit 5f015204 authored by Jun'ichi Nomura's avatar Jun'ichi Nomura Committed by Alasdair G Kergon

dm: merge io_pool and tio_pool

This patch merges io_pool and tio_pool into io_pool and cleans up
related functions.

Though device-mapper used to have 2 pools of objects for each dm device,
the use of bioset frontbad for per-bio data has shrunk the number of
pools to 1 for both bio-based and request-based device types.
(See c0820cf5 "dm: introduce per_bio_data" and
 94818742 "dm: Use bioset's front_pad for dm_rq_clone_bio_info")

So dm no longer has to maintain 2 different pointers.

No functional changes.
Signed-off-by: default avatarJun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent 23e5083b
...@@ -163,7 +163,6 @@ struct mapped_device { ...@@ -163,7 +163,6 @@ struct mapped_device {
* io objects are allocated from here. * io objects are allocated from here.
*/ */
mempool_t *io_pool; mempool_t *io_pool;
mempool_t *tio_pool;
struct bio_set *bs; struct bio_set *bs;
...@@ -197,7 +196,6 @@ struct mapped_device { ...@@ -197,7 +196,6 @@ struct mapped_device {
*/ */
struct dm_md_mempools { struct dm_md_mempools {
mempool_t *io_pool; mempool_t *io_pool;
mempool_t *tio_pool;
struct bio_set *bs; struct bio_set *bs;
}; };
...@@ -435,12 +433,12 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio) ...@@ -435,12 +433,12 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
return mempool_alloc(md->tio_pool, gfp_mask); return mempool_alloc(md->io_pool, gfp_mask);
} }
static void free_rq_tio(struct dm_rq_target_io *tio) static void free_rq_tio(struct dm_rq_target_io *tio)
{ {
mempool_free(tio, tio->md->tio_pool); mempool_free(tio, tio->md->io_pool);
} }
static int md_in_flight(struct mapped_device *md) static int md_in_flight(struct mapped_device *md)
...@@ -1949,8 +1947,6 @@ static void free_dev(struct mapped_device *md) ...@@ -1949,8 +1947,6 @@ static void free_dev(struct mapped_device *md)
unlock_fs(md); unlock_fs(md);
bdput(md->bdev); bdput(md->bdev);
destroy_workqueue(md->wq); destroy_workqueue(md->wq);
if (md->tio_pool)
mempool_destroy(md->tio_pool);
if (md->io_pool) if (md->io_pool)
mempool_destroy(md->io_pool); mempool_destroy(md->io_pool);
if (md->bs) if (md->bs)
...@@ -1973,7 +1969,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) ...@@ -1973,7 +1969,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{ {
struct dm_md_mempools *p = dm_table_get_md_mempools(t); struct dm_md_mempools *p = dm_table_get_md_mempools(t);
if (md->bs) { if (md->io_pool && md->bs) {
/* The md already has necessary mempools. */ /* The md already has necessary mempools. */
if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
/* /*
...@@ -1984,7 +1980,6 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) ...@@ -1984,7 +1980,6 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
md->bs = p->bs; md->bs = p->bs;
p->bs = NULL; p->bs = NULL;
} else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) { } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
BUG_ON(!md->tio_pool);
/* /*
* There's no need to reload with request-based dm * There's no need to reload with request-based dm
* because the size of front_pad doesn't change. * because the size of front_pad doesn't change.
...@@ -1997,12 +1992,10 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) ...@@ -1997,12 +1992,10 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
goto out; goto out;
} }
BUG_ON(!p || md->io_pool || md->tio_pool || md->bs); BUG_ON(!p || md->io_pool || md->bs);
md->io_pool = p->io_pool; md->io_pool = p->io_pool;
p->io_pool = NULL; p->io_pool = NULL;
md->tio_pool = p->tio_pool;
p->tio_pool = NULL;
md->bs = p->bs; md->bs = p->bs;
p->bs = NULL; p->bs = NULL;
...@@ -2759,54 +2752,42 @@ EXPORT_SYMBOL_GPL(dm_noflush_suspending); ...@@ -2759,54 +2752,42 @@ EXPORT_SYMBOL_GPL(dm_noflush_suspending);
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size) struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
{ {
struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS; struct kmem_cache *cachep;
unsigned int pool_size;
unsigned int front_pad;
if (!pools) if (!pools)
return NULL; return NULL;
per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io));
pools->io_pool = NULL;
if (type == DM_TYPE_BIO_BASED) { if (type == DM_TYPE_BIO_BASED) {
pools->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); cachep = _io_cache;
if (!pools->io_pool) pool_size = 16;
goto free_pools_and_out; front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
} } else if (type == DM_TYPE_REQUEST_BASED) {
cachep = _rq_tio_cache;
pool_size = MIN_IOS;
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
/* per_bio_data_size is not used. See __bind_mempools(). */
WARN_ON(per_bio_data_size != 0);
} else
goto out;
pools->tio_pool = NULL; pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep);
if (type == DM_TYPE_REQUEST_BASED) { if (!pools->io_pool)
pools->tio_pool = mempool_create_slab_pool(MIN_IOS, _rq_tio_cache); goto out;
if (!pools->tio_pool)
goto free_io_pool_and_out;
}
pools->bs = (type == DM_TYPE_BIO_BASED) ? pools->bs = bioset_create(pool_size, front_pad);
bioset_create(pool_size,
per_bio_data_size + offsetof(struct dm_target_io, clone)) :
bioset_create(pool_size,
offsetof(struct dm_rq_clone_bio_info, clone));
if (!pools->bs) if (!pools->bs)
goto free_tio_pool_and_out; goto out;
if (integrity && bioset_integrity_create(pools->bs, pool_size)) if (integrity && bioset_integrity_create(pools->bs, pool_size))
goto free_bioset_and_out; goto out;
return pools; return pools;
free_bioset_and_out: out:
bioset_free(pools->bs); dm_free_md_mempools(pools);
free_tio_pool_and_out:
if (pools->tio_pool)
mempool_destroy(pools->tio_pool);
free_io_pool_and_out:
if (pools->io_pool)
mempool_destroy(pools->io_pool);
free_pools_and_out:
kfree(pools);
return NULL; return NULL;
} }
...@@ -2819,9 +2800,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools) ...@@ -2819,9 +2800,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
if (pools->io_pool) if (pools->io_pool)
mempool_destroy(pools->io_pool); mempool_destroy(pools->io_pool);
if (pools->tio_pool)
mempool_destroy(pools->tio_pool);
if (pools->bs) if (pools->bs)
bioset_free(pools->bs); bioset_free(pools->bs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment