Commit 23e5083b authored by Jun'ichi Nomura's avatar Jun'ichi Nomura Committed by Alasdair G Kergon

dm: remove unused _rq_bio_info_cache

Remove _rq_bio_info_cache, which is no longer used.
No functional changes.
Signed-off-by: default avatarJun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent 87eb5b21
...@@ -205,12 +205,6 @@ struct dm_md_mempools { ...@@ -205,12 +205,6 @@ struct dm_md_mempools {
static struct kmem_cache *_io_cache; static struct kmem_cache *_io_cache;
static struct kmem_cache *_rq_tio_cache; static struct kmem_cache *_rq_tio_cache;
/*
* Unused now, and needs to be deleted. But since io_pool is overloaded and it's
* still used for _io_cache, I'm leaving this for a later cleanup
*/
static struct kmem_cache *_rq_bio_info_cache;
static int __init local_init(void) static int __init local_init(void)
{ {
int r = -ENOMEM; int r = -ENOMEM;
...@@ -224,13 +218,9 @@ static int __init local_init(void) ...@@ -224,13 +218,9 @@ static int __init local_init(void)
if (!_rq_tio_cache) if (!_rq_tio_cache)
goto out_free_io_cache; goto out_free_io_cache;
_rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
if (!_rq_bio_info_cache)
goto out_free_rq_tio_cache;
r = dm_uevent_init(); r = dm_uevent_init();
if (r) if (r)
goto out_free_rq_bio_info_cache; goto out_free_rq_tio_cache;
_major = major; _major = major;
r = register_blkdev(_major, _name); r = register_blkdev(_major, _name);
...@@ -244,8 +234,6 @@ static int __init local_init(void) ...@@ -244,8 +234,6 @@ static int __init local_init(void)
out_uevent_exit: out_uevent_exit:
dm_uevent_exit(); dm_uevent_exit();
out_free_rq_bio_info_cache:
kmem_cache_destroy(_rq_bio_info_cache);
out_free_rq_tio_cache: out_free_rq_tio_cache:
kmem_cache_destroy(_rq_tio_cache); kmem_cache_destroy(_rq_tio_cache);
out_free_io_cache: out_free_io_cache:
...@@ -256,7 +244,6 @@ static int __init local_init(void) ...@@ -256,7 +244,6 @@ static int __init local_init(void)
static void local_exit(void) static void local_exit(void)
{ {
kmem_cache_destroy(_rq_bio_info_cache);
kmem_cache_destroy(_rq_tio_cache); kmem_cache_destroy(_rq_tio_cache);
kmem_cache_destroy(_io_cache); kmem_cache_destroy(_io_cache);
unregister_blkdev(_major, _name); unregister_blkdev(_major, _name);
...@@ -1986,7 +1973,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) ...@@ -1986,7 +1973,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{ {
struct dm_md_mempools *p = dm_table_get_md_mempools(t); struct dm_md_mempools *p = dm_table_get_md_mempools(t);
if (md->io_pool && md->bs) { if (md->bs) {
/* The md already has necessary mempools. */ /* The md already has necessary mempools. */
if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
/* /*
...@@ -2780,11 +2767,12 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u ...@@ -2780,11 +2767,12 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io)); per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io));
pools->io_pool = (type == DM_TYPE_BIO_BASED) ? pools->io_pool = NULL;
mempool_create_slab_pool(MIN_IOS, _io_cache) : if (type == DM_TYPE_BIO_BASED) {
mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache); pools->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
if (!pools->io_pool) if (!pools->io_pool)
goto free_pools_and_out; goto free_pools_and_out;
}
pools->tio_pool = NULL; pools->tio_pool = NULL;
if (type == DM_TYPE_REQUEST_BASED) { if (type == DM_TYPE_REQUEST_BASED) {
...@@ -2814,6 +2802,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u ...@@ -2814,6 +2802,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
mempool_destroy(pools->tio_pool); mempool_destroy(pools->tio_pool);
free_io_pool_and_out: free_io_pool_and_out:
if (pools->io_pool)
mempool_destroy(pools->io_pool); mempool_destroy(pools->io_pool);
free_pools_and_out: free_pools_and_out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment