Commit cffd425b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.0/dm-fixes' of...

Merge tag 'for-5.0/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - Fix DM crypt's parsing of extended IV arguments.

 - Fix DM thinp's discard passdown to properly account for extra
   reference that is taken to guard against reallocating a block before
   a discard has been issued.

 - Fix bio-based DM's redundant IO accounting that was occurring for
   bios that must be split due to the nature of the DM target (e.g.
   dm-stripe, dm-thinp, etc).

* tag 'for-5.0/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm: add missing trace_block_split() to __split_and_process_bio()
  dm: fix dm_wq_work() to only use __split_and_process_bio() if appropriate
  dm: fix redundant IO accounting for bios that need splitting
  dm: fix clone_bio() to trigger blk_recount_segments()
  dm thin: fix passdown_double_checking_shared_status()
  dm crypt: fix parsing of extended IV arguments
parents c04e2a78 075c18c3
...@@ -2414,9 +2414,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key ...@@ -2414,9 +2414,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
* capi:cipher_api_spec-iv:ivopts * capi:cipher_api_spec-iv:ivopts
*/ */
tmp = &cipher_in[strlen("capi:")]; tmp = &cipher_in[strlen("capi:")];
cipher_api = strsep(&tmp, "-");
*ivmode = strsep(&tmp, ":"); /* Separate IV options if present, it can contain another '-' in hash name */
*ivopts = tmp; *ivopts = strrchr(tmp, ':');
if (*ivopts) {
**ivopts = '\0';
(*ivopts)++;
}
/* Parse IV mode */
*ivmode = strrchr(tmp, '-');
if (*ivmode) {
**ivmode = '\0';
(*ivmode)++;
}
/* The rest is crypto API spec */
cipher_api = tmp;
if (*ivmode && !strcmp(*ivmode, "lmk")) if (*ivmode && !strcmp(*ivmode, "lmk"))
cc->tfms_count = 64; cc->tfms_count = 64;
...@@ -2486,11 +2498,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key ...@@ -2486,11 +2498,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
goto bad_mem; goto bad_mem;
chainmode = strsep(&tmp, "-"); chainmode = strsep(&tmp, "-");
*ivopts = strsep(&tmp, "-"); *ivmode = strsep(&tmp, ":");
*ivmode = strsep(&*ivopts, ":"); *ivopts = tmp;
if (tmp)
DMWARN("Ignoring unexpected additional cipher options");
/* /*
* For compatibility with the original dm-crypt mapping format, if * For compatibility with the original dm-crypt mapping format, if
......
...@@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td, ...@@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td,
return r; return r;
} }
int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
{ {
int r; int r;
uint32_t ref_count; uint32_t ref_count;
...@@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu ...@@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
down_read(&pmd->root_lock); down_read(&pmd->root_lock);
r = dm_sm_get_count(pmd->data_sm, b, &ref_count); r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
if (!r) if (!r)
*result = (ref_count != 0); *result = (ref_count > 1);
up_read(&pmd->root_lock); up_read(&pmd->root_lock);
return r; return r;
......
...@@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd, ...@@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
......
...@@ -1048,7 +1048,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m ...@@ -1048,7 +1048,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
* passdown we have to check that these blocks are now unused. * passdown we have to check that these blocks are now unused.
*/ */
int r = 0; int r = 0;
bool used = true; bool shared = true;
struct thin_c *tc = m->tc; struct thin_c *tc = m->tc;
struct pool *pool = tc->pool; struct pool *pool = tc->pool;
dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
...@@ -1058,11 +1058,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m ...@@ -1058,11 +1058,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
while (b != end) { while (b != end) {
/* find start of unmapped run */ /* find start of unmapped run */
for (; b < end; b++) { for (; b < end; b++) {
r = dm_pool_block_is_used(pool->pmd, b, &used); r = dm_pool_block_is_shared(pool->pmd, b, &shared);
if (r) if (r)
goto out; goto out;
if (!used) if (!shared)
break; break;
} }
...@@ -1071,11 +1071,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m ...@@ -1071,11 +1071,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
/* find end of run */ /* find end of run */
for (e = b + 1; e != end; e++) { for (e = b + 1; e != end; e++) {
r = dm_pool_block_is_used(pool->pmd, e, &used); r = dm_pool_block_is_shared(pool->pmd, e, &shared);
if (r) if (r)
goto out; goto out;
if (used) if (shared)
break; break;
} }
......
...@@ -1320,7 +1320,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio, ...@@ -1320,7 +1320,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
__bio_clone_fast(clone, bio); __bio_clone_fast(clone, bio);
if (unlikely(bio_integrity(bio) != NULL)) { if (bio_integrity(bio)) {
int r; int r;
if (unlikely(!dm_target_has_integrity(tio->ti->type) && if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
...@@ -1336,11 +1336,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio, ...@@ -1336,11 +1336,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
return r; return r;
} }
bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); bio_trim(clone, sector - clone->bi_iter.bi_sector, len);
clone->bi_iter.bi_size = to_bytes(len);
if (unlikely(bio_integrity(bio) != NULL))
bio_integrity_trim(clone);
return 0; return 0;
} }
...@@ -1588,6 +1584,9 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md, ...@@ -1588,6 +1584,9 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
ci->sector = bio->bi_iter.bi_sector; ci->sector = bio->bi_iter.bi_sector;
} }
#define __dm_part_stat_sub(part, field, subnd) \
(part_stat_get(part, field) -= (subnd))
/* /*
* Entry point to split a bio into clones and submit them to the targets. * Entry point to split a bio into clones and submit them to the targets.
*/ */
...@@ -1642,7 +1641,21 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, ...@@ -1642,7 +1641,21 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
GFP_NOIO, &md->queue->bio_split); GFP_NOIO, &md->queue->bio_split);
ci.io->orig_bio = b; ci.io->orig_bio = b;
/*
* Adjust IO stats for each split, otherwise upon queue
* reentry there will be redundant IO accounting.
* NOTE: this is a stop-gap fix, a proper fix involves
* significant refactoring of DM core's bio splitting
* (by eliminating DM's splitting and just using bio_split)
*/
part_stat_lock();
__dm_part_stat_sub(&dm_disk(md)->part0,
sectors[op_stat_group(bio_op(bio))], ci.sector_count);
part_stat_unlock();
bio_chain(b, bio); bio_chain(b, bio);
trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
ret = generic_make_request(bio); ret = generic_make_request(bio);
break; break;
} }
...@@ -1713,6 +1726,15 @@ static blk_qc_t __process_bio(struct mapped_device *md, ...@@ -1713,6 +1726,15 @@ static blk_qc_t __process_bio(struct mapped_device *md,
return ret; return ret;
} }
static blk_qc_t dm_process_bio(struct mapped_device *md,
struct dm_table *map, struct bio *bio)
{
if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
return __process_bio(md, map, bio);
else
return __split_and_process_bio(md, map, bio);
}
static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
{ {
struct mapped_device *md = q->queuedata; struct mapped_device *md = q->queuedata;
...@@ -1733,10 +1755,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) ...@@ -1733,10 +1755,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
return ret; return ret;
} }
if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) ret = dm_process_bio(md, map, bio);
ret = __process_bio(md, map, bio);
else
ret = __split_and_process_bio(md, map, bio);
dm_put_live_table(md, srcu_idx); dm_put_live_table(md, srcu_idx);
return ret; return ret;
...@@ -2415,9 +2434,9 @@ static void dm_wq_work(struct work_struct *work) ...@@ -2415,9 +2434,9 @@ static void dm_wq_work(struct work_struct *work)
break; break;
if (dm_request_based(md)) if (dm_request_based(md))
generic_make_request(c); (void) generic_make_request(c);
else else
__split_and_process_bio(md, map, c); (void) dm_process_bio(md, map, c);
} }
dm_put_live_table(md, srcu_idx); dm_put_live_table(md, srcu_idx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment