Commit 6a6c5613 authored by Mikulas Patocka's avatar Mikulas Patocka

dm-crypt: support for per-sector NVMe metadata

Support per-sector NVMe metadata in dm-crypt.

This commit changes dm-crypt, so that it can use NVMe metadata to store
authentication information. We can put dm-crypt directly on the top of
NVMe device, without using dm-integrity.

This commit improves write throughput twice, becase the will be no writes
to the dm-integrity journal.
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
parent a48f6b82
...@@ -214,7 +214,8 @@ struct crypt_config { ...@@ -214,7 +214,8 @@ struct crypt_config {
unsigned int integrity_tag_size; unsigned int integrity_tag_size;
unsigned int integrity_iv_size; unsigned int integrity_iv_size;
unsigned int on_disk_tag_size; unsigned int used_tag_size;
unsigned int tuple_size;
/* /*
* pool for per bio private data, crypto requests, * pool for per bio private data, crypto requests,
...@@ -256,7 +257,7 @@ static unsigned get_max_request_size(struct crypt_config *cc, bool wrt) ...@@ -256,7 +257,7 @@ static unsigned get_max_request_size(struct crypt_config *cc, bool wrt)
val = !wrt ? READ_ONCE(max_read_size) : READ_ONCE(max_write_size); val = !wrt ? READ_ONCE(max_read_size) : READ_ONCE(max_write_size);
if (likely(!val)) if (likely(!val))
val = !wrt ? DM_CRYPT_DEFAULT_MAX_READ_SIZE : DM_CRYPT_DEFAULT_MAX_WRITE_SIZE; val = !wrt ? DM_CRYPT_DEFAULT_MAX_READ_SIZE : DM_CRYPT_DEFAULT_MAX_WRITE_SIZE;
if (wrt || cc->on_disk_tag_size) { if (wrt || cc->used_tag_size) {
if (unlikely(val > BIO_MAX_VECS << PAGE_SHIFT)) if (unlikely(val > BIO_MAX_VECS << PAGE_SHIFT))
val = BIO_MAX_VECS << PAGE_SHIFT; val = BIO_MAX_VECS << PAGE_SHIFT;
} }
...@@ -1176,14 +1177,14 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio) ...@@ -1176,14 +1177,14 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
unsigned int tag_len; unsigned int tag_len;
int ret; int ret;
if (!bio_sectors(bio) || !io->cc->on_disk_tag_size) if (!bio_sectors(bio) || !io->cc->tuple_size)
return 0; return 0;
bip = bio_integrity_alloc(bio, GFP_NOIO, 1); bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
if (IS_ERR(bip)) if (IS_ERR(bip))
return PTR_ERR(bip); return PTR_ERR(bip);
tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift); tag_len = io->cc->tuple_size * (bio_sectors(bio) >> io->cc->sector_shift);
bip->bip_iter.bi_sector = io->cc->start + io->sector; bip->bip_iter.bi_sector = io->cc->start + io->sector;
...@@ -1207,18 +1208,18 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti) ...@@ -1207,18 +1208,18 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
return -EINVAL; return -EINVAL;
} }
if (bi->tag_size != cc->on_disk_tag_size || if (bi->tuple_size < cc->used_tag_size) {
bi->tuple_size != cc->on_disk_tag_size) {
ti->error = "Integrity profile tag size mismatch."; ti->error = "Integrity profile tag size mismatch.";
return -EINVAL; return -EINVAL;
} }
cc->tuple_size = bi->tuple_size;
if (1 << bi->interval_exp != cc->sector_size) { if (1 << bi->interval_exp != cc->sector_size) {
ti->error = "Integrity profile sector size mismatch."; ti->error = "Integrity profile sector size mismatch.";
return -EINVAL; return -EINVAL;
} }
if (crypt_integrity_aead(cc)) { if (crypt_integrity_aead(cc)) {
cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size; cc->integrity_tag_size = cc->used_tag_size - cc->integrity_iv_size;
DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md), DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
cc->integrity_tag_size, cc->integrity_iv_size); cc->integrity_tag_size, cc->integrity_iv_size);
...@@ -1230,7 +1231,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti) ...@@ -1230,7 +1231,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md), DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
cc->integrity_iv_size); cc->integrity_iv_size);
if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) { if ((cc->integrity_tag_size + cc->integrity_iv_size) > cc->tuple_size) {
ti->error = "Not enough space for integrity tag in the profile."; ti->error = "Not enough space for integrity tag in the profile.";
return -EINVAL; return -EINVAL;
} }
...@@ -1309,7 +1310,7 @@ static void *tag_from_dmreq(struct crypt_config *cc, ...@@ -1309,7 +1310,7 @@ static void *tag_from_dmreq(struct crypt_config *cc,
struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) * return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
cc->on_disk_tag_size]; cc->tuple_size];
} }
static void *iv_tag_from_dmreq(struct crypt_config *cc, static void *iv_tag_from_dmreq(struct crypt_config *cc,
...@@ -1390,9 +1391,9 @@ static int crypt_convert_block_aead(struct crypt_config *cc, ...@@ -1390,9 +1391,9 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
cc->sector_size, iv); cc->sector_size, iv);
r = crypto_aead_encrypt(req); r = crypto_aead_encrypt(req);
if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size) if (cc->integrity_tag_size + cc->integrity_iv_size != cc->tuple_size)
memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0, memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size)); cc->tuple_size - (cc->integrity_tag_size + cc->integrity_iv_size));
} else { } else {
aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
cc->sector_size + cc->integrity_tag_size, iv); cc->sector_size + cc->integrity_tag_size, iv);
...@@ -1822,7 +1823,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io) ...@@ -1822,7 +1823,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
return; return;
if (likely(!io->ctx.aead_recheck) && unlikely(io->ctx.aead_failed) && if (likely(!io->ctx.aead_recheck) && unlikely(io->ctx.aead_failed) &&
cc->on_disk_tag_size && bio_data_dir(base_bio) == READ) { cc->used_tag_size && bio_data_dir(base_bio) == READ) {
io->ctx.aead_recheck = true; io->ctx.aead_recheck = true;
io->ctx.aead_failed = false; io->ctx.aead_failed = false;
io->error = 0; io->error = 0;
...@@ -3206,7 +3207,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar ...@@ -3206,7 +3207,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
ti->error = "Invalid integrity arguments"; ti->error = "Invalid integrity arguments";
return -EINVAL; return -EINVAL;
} }
cc->on_disk_tag_size = val; cc->used_tag_size = val;
sval = strchr(opt_string + strlen("integrity:"), ':') + 1; sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
if (!strcasecmp(sval, "aead")) { if (!strcasecmp(sval, "aead")) {
set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags); set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
...@@ -3418,12 +3419,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -3418,12 +3419,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (ret) if (ret)
goto bad; goto bad;
cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size; cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->tuple_size;
if (!cc->tag_pool_max_sectors) if (!cc->tag_pool_max_sectors)
cc->tag_pool_max_sectors = 1; cc->tag_pool_max_sectors = 1;
ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS, ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
cc->tag_pool_max_sectors * cc->on_disk_tag_size); cc->tag_pool_max_sectors * cc->tuple_size);
if (ret) { if (ret) {
ti->error = "Cannot allocate integrity tags mempool"; ti->error = "Cannot allocate integrity tags mempool";
goto bad; goto bad;
...@@ -3535,8 +3536,8 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) ...@@ -3535,8 +3536,8 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
io = dm_per_bio_data(bio, cc->per_bio_data_size); io = dm_per_bio_data(bio, cc->per_bio_data_size);
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
if (cc->on_disk_tag_size) { if (cc->tuple_size) {
unsigned int tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift); unsigned int tag_len = cc->tuple_size * (bio_sectors(bio) >> cc->sector_shift);
if (unlikely(tag_len > KMALLOC_MAX_SIZE)) if (unlikely(tag_len > KMALLOC_MAX_SIZE))
io->integrity_metadata = NULL; io->integrity_metadata = NULL;
...@@ -3608,7 +3609,7 @@ static void crypt_status(struct dm_target *ti, status_type_t type, ...@@ -3608,7 +3609,7 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags); num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT); num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
if (cc->on_disk_tag_size) if (cc->used_tag_size)
num_feature_args++; num_feature_args++;
if (num_feature_args) { if (num_feature_args) {
DMEMIT(" %d", num_feature_args); DMEMIT(" %d", num_feature_args);
...@@ -3624,8 +3625,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type, ...@@ -3624,8 +3625,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
DMEMIT(" no_read_workqueue"); DMEMIT(" no_read_workqueue");
if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))
DMEMIT(" no_write_workqueue"); DMEMIT(" no_write_workqueue");
if (cc->on_disk_tag_size) if (cc->used_tag_size)
DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth); DMEMIT(" integrity:%u:%s", cc->used_tag_size, cc->cipher_auth);
if (cc->sector_size != (1 << SECTOR_SHIFT)) if (cc->sector_size != (1 << SECTOR_SHIFT))
DMEMIT(" sector_size:%d", cc->sector_size); DMEMIT(" sector_size:%d", cc->sector_size);
if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
...@@ -3647,9 +3648,9 @@ static void crypt_status(struct dm_target *ti, status_type_t type, ...@@ -3647,9 +3648,9 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
DMEMIT(",iv_large_sectors=%c", test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags) ? DMEMIT(",iv_large_sectors=%c", test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags) ?
'y' : 'n'); 'y' : 'n');
if (cc->on_disk_tag_size) if (cc->used_tag_size)
DMEMIT(",integrity_tag_size=%u,cipher_auth=%s", DMEMIT(",integrity_tag_size=%u,cipher_auth=%s",
cc->on_disk_tag_size, cc->cipher_auth); cc->used_tag_size, cc->cipher_auth);
if (cc->sector_size != (1 << SECTOR_SHIFT)) if (cc->sector_size != (1 << SECTOR_SHIFT))
DMEMIT(",sector_size=%d", cc->sector_size); DMEMIT(",sector_size=%d", cc->sector_size);
if (cc->cipher_string) if (cc->cipher_string)
...@@ -3757,7 +3758,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) ...@@ -3757,7 +3758,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = { static struct target_type crypt_target = {
.name = "crypt", .name = "crypt",
.version = {1, 26, 0}, .version = {1, 27, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = crypt_ctr, .ctr = crypt_ctr,
.dtr = crypt_dtr, .dtr = crypt_dtr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment