Commit 583fe747 authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer

dm crypt: fix large block integrity support

Previously, dm-crypt could use blocks composed of multiple 512b sectors
but it created integrity profile for each 512b sector (it padded it with
zeroes).  Fix dm-crypt so that the integrity profile is sent for each
block not each sector.

The user must use the same block size in the DM crypt and integrity
targets.
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 9d609f85
...@@ -938,10 +938,15 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti) ...@@ -938,10 +938,15 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
return -EINVAL; return -EINVAL;
} }
if (bi->tag_size != cc->on_disk_tag_size) { if (bi->tag_size != cc->on_disk_tag_size ||
bi->tuple_size != cc->on_disk_tag_size) {
ti->error = "Integrity profile tag size mismatch."; ti->error = "Integrity profile tag size mismatch.";
return -EINVAL; return -EINVAL;
} }
if (1 << bi->interval_exp != cc->sector_size) {
ti->error = "Integrity profile sector size mismatch.";
return -EINVAL;
}
if (crypt_integrity_aead(cc)) { if (crypt_integrity_aead(cc)) {
cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size; cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
...@@ -1322,7 +1327,7 @@ static int crypt_convert(struct crypt_config *cc, ...@@ -1322,7 +1327,7 @@ static int crypt_convert(struct crypt_config *cc,
case -EINPROGRESS: case -EINPROGRESS:
ctx->r.req = NULL; ctx->r.req = NULL;
ctx->cc_sector += sector_step; ctx->cc_sector += sector_step;
tag_offset += sector_step; tag_offset++;
continue; continue;
/* /*
* The request was already processed (synchronously). * The request was already processed (synchronously).
...@@ -1330,7 +1335,7 @@ static int crypt_convert(struct crypt_config *cc, ...@@ -1330,7 +1335,7 @@ static int crypt_convert(struct crypt_config *cc,
case 0: case 0:
atomic_dec(&ctx->cc_pending); atomic_dec(&ctx->cc_pending);
ctx->cc_sector += sector_step; ctx->cc_sector += sector_step;
tag_offset += sector_step; tag_offset++;
cond_resched(); cond_resched();
continue; continue;
/* /*
...@@ -2735,6 +2740,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -2735,6 +2740,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Cannot allocate integrity tags mempool"; ti->error = "Cannot allocate integrity tags mempool";
goto bad; goto bad;
} }
cc->tag_pool_max_sectors <<= cc->sector_shift;
} }
ret = -ENOMEM; ret = -ENOMEM;
...@@ -2816,16 +2823,15 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) ...@@ -2816,16 +2823,15 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
if (cc->on_disk_tag_size) { if (cc->on_disk_tag_size) {
unsigned tag_len = cc->on_disk_tag_size * bio_sectors(bio); unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
if (unlikely(tag_len > KMALLOC_MAX_SIZE) || if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
unlikely(!(io->integrity_metadata = kzalloc(tag_len, unlikely(!(io->integrity_metadata = kmalloc(tag_len,
GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) { GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
if (bio_sectors(bio) > cc->tag_pool_max_sectors) if (bio_sectors(bio) > cc->tag_pool_max_sectors)
dm_accept_partial_bio(bio, cc->tag_pool_max_sectors); dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
io->integrity_metadata = mempool_alloc(cc->tag_pool, GFP_NOIO); io->integrity_metadata = mempool_alloc(cc->tag_pool, GFP_NOIO);
io->integrity_metadata_from_pool = true; io->integrity_metadata_from_pool = true;
memset(io->integrity_metadata, 0, cc->tag_pool_max_sectors * (1 << SECTOR_SHIFT));
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment