Commit 4e6e36c3 authored by Mike Snitzer's avatar Mike Snitzer

Revert "dm: do not allocate any mempools for blk-mq request-based DM"

This reverts commit cbc4e3c1.
Reported-by: default avatarJunichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent e262f347
...@@ -964,8 +964,8 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * ...@@ -964,8 +964,8 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
return -EINVAL; return -EINVAL;
} }
if (IS_ERR(t->mempools)) if (!t->mempools)
return PTR_ERR(t->mempools); return -ENOMEM;
return 0; return 0;
} }
......
...@@ -2349,52 +2349,39 @@ static void free_dev(struct mapped_device *md) ...@@ -2349,52 +2349,39 @@ static void free_dev(struct mapped_device *md)
kfree(md); kfree(md);
} }
static unsigned filter_md_type(unsigned type, struct mapped_device *md)
{
if (type == DM_TYPE_BIO_BASED)
return type;
return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED;
}
static void __bind_mempools(struct mapped_device *md, struct dm_table *t) static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{ {
struct dm_md_mempools *p = dm_table_get_md_mempools(t); struct dm_md_mempools *p = dm_table_get_md_mempools(t);
switch (filter_md_type(dm_table_get_type(t), md)) { if (md->bs) {
case DM_TYPE_BIO_BASED: /* The md already has necessary mempools. */
if (md->bs && md->io_pool) { if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
/* /*
* This bio-based md already has necessary mempools.
* Reload bioset because front_pad may have changed * Reload bioset because front_pad may have changed
* because a different table was loaded. * because a different table was loaded.
*/ */
bioset_free(md->bs); bioset_free(md->bs);
md->bs = p->bs; md->bs = p->bs;
p->bs = NULL; p->bs = NULL;
goto out;
} }
break; /*
case DM_TYPE_REQUEST_BASED: * There's no need to reload with request-based dm
if (md->rq_pool && md->io_pool) * because the size of front_pad doesn't change.
/* * Note for future: If you are to reload bioset,
* This request-based md already has necessary mempools. * prep-ed requests in the queue may refer
*/ * to bio from the old bioset, so you must walk
goto out; * through the queue to unprep.
break; */
case DM_TYPE_MQ_REQUEST_BASED: goto out;
BUG_ON(p); /* No mempools needed */
return;
} }
BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
md->io_pool = p->io_pool; md->io_pool = p->io_pool;
p->io_pool = NULL; p->io_pool = NULL;
md->rq_pool = p->rq_pool; md->rq_pool = p->rq_pool;
p->rq_pool = NULL; p->rq_pool = NULL;
md->bs = p->bs; md->bs = p->bs;
p->bs = NULL; p->bs = NULL;
out: out:
/* mempool bind completed, no longer need any mempools in the table */ /* mempool bind completed, no longer need any mempools in the table */
dm_table_free_md_mempools(t); dm_table_free_md_mempools(t);
...@@ -2774,6 +2761,14 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) ...@@ -2774,6 +2761,14 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
return err; return err;
} }
static unsigned filter_md_type(unsigned type, struct mapped_device *md)
{
if (type == DM_TYPE_BIO_BASED)
return type;
return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED;
}
/* /*
* Setup the DM device's queue based on md's type * Setup the DM device's queue based on md's type
*/ */
...@@ -3495,7 +3490,7 @@ struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity, ...@@ -3495,7 +3490,7 @@ struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity,
pools = kzalloc(sizeof(*pools), GFP_KERNEL); pools = kzalloc(sizeof(*pools), GFP_KERNEL);
if (!pools) if (!pools)
return ERR_PTR(-ENOMEM); return NULL;
front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) +
offsetof(struct dm_target_io, clone); offsetof(struct dm_target_io, clone);
...@@ -3514,26 +3509,24 @@ struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity, ...@@ -3514,26 +3509,24 @@ struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity,
return pools; return pools;
out: out:
dm_free_md_mempools(pools); dm_free_md_mempools(pools);
return ERR_PTR(-ENOMEM); return NULL;
} }
struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md, struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md,
unsigned type) unsigned type)
{ {
unsigned int pool_size; unsigned int pool_size = dm_get_reserved_rq_based_ios();
struct dm_md_mempools *pools; struct dm_md_mempools *pools;
if (filter_md_type(type, md) == DM_TYPE_MQ_REQUEST_BASED)
return NULL; /* No mempools needed */
pool_size = dm_get_reserved_rq_based_ios();
pools = kzalloc(sizeof(*pools), GFP_KERNEL); pools = kzalloc(sizeof(*pools), GFP_KERNEL);
if (!pools) if (!pools)
return ERR_PTR(-ENOMEM); return NULL;
pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); if (filter_md_type(type, md) == DM_TYPE_REQUEST_BASED) {
if (!pools->rq_pool) pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
goto out; if (!pools->rq_pool)
goto out;
}
pools->io_pool = mempool_create_slab_pool(pool_size, _rq_tio_cache); pools->io_pool = mempool_create_slab_pool(pool_size, _rq_tio_cache);
if (!pools->io_pool) if (!pools->io_pool)
...@@ -3542,7 +3535,7 @@ struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md, ...@@ -3542,7 +3535,7 @@ struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md,
return pools; return pools;
out: out:
dm_free_md_mempools(pools); dm_free_md_mempools(pools);
return ERR_PTR(-ENOMEM); return NULL;
} }
void dm_free_md_mempools(struct dm_md_mempools *pools) void dm_free_md_mempools(struct dm_md_mempools *pools)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment