Commit ef43aa38 authored by Milan Broz's avatar Milan Broz Committed by Mike Snitzer

dm crypt: add cryptographic data integrity protection (authenticated encryption)

Allow the use of per-sector metadata, provided by the dm-integrity
module, for integrity protection and persistently stored per-sector
Initialization Vector (IV).  The underlying device must support the
"DM-DIF-EXT-TAG" dm-integrity profile.

The per-bio integrity metadata is allocated by dm-crypt for every bio.

Example of low-level mapping table for various types of use:
 DEV=/dev/sdb
 SIZE=417792

 # Additional HMAC with CBC-ESSIV, key is concatenated encryption key + HMAC key
 SIZE_INT=389952
 dmsetup create x --table "0 $SIZE_INT integrity $DEV 0 32 J 0"
 dmsetup create y --table "0 $SIZE_INT crypt aes-cbc-essiv:sha256 \
 11ff33c6fb942655efb3e30cf4c0fd95f5ef483afca72166c530ae26151dd83b \
 00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff \
 0 /dev/mapper/x 0 1 integrity:32:hmac(sha256)"

 # AEAD (Authenticated Encryption with Additional Data) - GCM with random IVs
 # GCM in kernel uses 96bits IV and we store 128bits auth tag (so 28 bytes metadata space)
 SIZE_INT=393024
 dmsetup create x --table "0 $SIZE_INT integrity $DEV 0 28 J 0"
 dmsetup create y --table "0 $SIZE_INT crypt aes-gcm-random \
 11ff33c6fb942655efb3e30cf4c0fd95f5ef483afca72166c530ae26151dd83b \
 0 /dev/mapper/x 0 1 integrity:28:aead"

 # Random IV only for XTS mode (no integrity protection but provides atomic random sector change)
 SIZE_INT=401272
 dmsetup create x --table "0 $SIZE_INT integrity $DEV 0 16 J 0"
 dmsetup create y --table "0 $SIZE_INT crypt aes-xts-random \
 11ff33c6fb942655efb3e30cf4c0fd95f5ef483afca72166c530ae26151dd83b \
 0 /dev/mapper/x 0 1 integrity:16:none"

 # Random IV with XTS + HMAC integrity protection
 SIZE_INT=377656
 dmsetup create x --table "0 $SIZE_INT integrity $DEV 0 48 J 0"
 dmsetup create y --table "0 $SIZE_INT crypt aes-xts-random \
 11ff33c6fb942655efb3e30cf4c0fd95f5ef483afca72166c530ae26151dd83b \
 00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff \
 0 /dev/mapper/x 0 1 integrity:48:hmac(sha256)"

Both AEAD and HMAC protection authenticates not only data but also
sector metadata.

HMAC protection is implemented through autenc wrapper (so it is
processed the same way as an authenticated mode).

In HMAC mode there are two keys (concatenated in dm-crypt mapping
table).  First is the encryption key and the second is the key for
authentication (HMAC).  (It is userspace decision if these keys are
independent or somehow derived.)

The sector request for AEAD/HMAC authenticated encryption looks like this:
 |----- AAD -------|------ DATA -------|-- AUTH TAG --|
 | (authenticated) | (auth+encryption) |              |
 | sector_LE |  IV |  sector in/out    |  tag in/out  |

For writes, the integrity fields are calculated during AEAD encryption
of every sector and stored in bio integrity fields and sent to
underlying dm-integrity target for storage.

For reads, the integrity metadata is verified during AEAD decryption of
every sector (they are filled in by dm-integrity, but the integrity
fields are pre-allocated in dm-crypt).

There is also an experimental support in cryptsetup utility for more
friendly configuration (part of LUKS2 format).

Because the integrity fields are not valid on initial creation, the
device must be "formatted".  This can be done by direct-io writes to the
device (e.g. dd in direct-io mode).  For now, there is available trivial
tool to do this, see: https://github.com/mbroz/dm_int_toolsSigned-off-by: default avatarMilan Broz <gmazyland@gmail.com>
Signed-off-by: default avatarOndrej Mosnacek <omosnacek@gmail.com>
Signed-off-by: default avatarVashek Matyas <matyas@fi.muni.cz>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 7eada909
...@@ -93,6 +93,22 @@ submit_from_crypt_cpus ...@@ -93,6 +93,22 @@ submit_from_crypt_cpus
thread because it benefits CFQ to have writes submitted using the thread because it benefits CFQ to have writes submitted using the
same context. same context.
integrity:<bytes>:<type>
Calculates and verifies integrity for the encrypted device (uses
authenticated encryption). This mode requires metadata stored in per-bio
integrity structure of <bytes> in size.
This option requires that the underlying device is created by dm-integrity
target and provides exactly <bytes> of per-sector metadata.
There can by two options for <type>. The first one is used when encryption
mode is Authenticated mode (AEAD mode), then type must be just "aead".
The second option is integrity calculated by keyed hash (HMAC), then
<type> is for example "hmac(sha256)".
If random IV is used (persistently stored IV in metadata per-sector),
then <bytes> includes both space for random IV and authentication tag.
Example scripts Example scripts
=============== ===============
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
......
/* /*
* Copyright (C) 2003 Jana Saout <jana@saout.de> * Copyright (C) 2003 Jana Saout <jana@saout.de>
* Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
* Copyright (C) 2006-2015 Red Hat, Inc. All rights reserved. * Copyright (C) 2006-2017 Red Hat, Inc. All rights reserved.
* Copyright (C) 2013 Milan Broz <gmazyland@gmail.com> * Copyright (C) 2013-2017 Milan Broz <gmazyland@gmail.com>
* *
* This file is released under the GPL. * This file is released under the GPL.
*/ */
...@@ -31,6 +31,9 @@ ...@@ -31,6 +31,9 @@
#include <crypto/md5.h> #include <crypto/md5.h>
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/skcipher.h> #include <crypto/skcipher.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
#include <keys/user-type.h> #include <keys/user-type.h>
#include <linux/device-mapper.h> #include <linux/device-mapper.h>
...@@ -48,7 +51,11 @@ struct convert_context { ...@@ -48,7 +51,11 @@ struct convert_context {
struct bvec_iter iter_out; struct bvec_iter iter_out;
sector_t cc_sector; sector_t cc_sector;
atomic_t cc_pending; atomic_t cc_pending;
struct skcipher_request *req; union {
struct skcipher_request *req;
struct aead_request *req_aead;
} r;
}; };
/* /*
...@@ -57,6 +64,8 @@ struct convert_context { ...@@ -57,6 +64,8 @@ struct convert_context {
struct dm_crypt_io { struct dm_crypt_io {
struct crypt_config *cc; struct crypt_config *cc;
struct bio *base_bio; struct bio *base_bio;
u8 *integrity_metadata;
bool integrity_metadata_from_pool;
struct work_struct work; struct work_struct work;
struct convert_context ctx; struct convert_context ctx;
...@@ -70,8 +79,8 @@ struct dm_crypt_io { ...@@ -70,8 +79,8 @@ struct dm_crypt_io {
struct dm_crypt_request { struct dm_crypt_request {
struct convert_context *ctx; struct convert_context *ctx;
struct scatterlist sg_in; struct scatterlist sg_in[4];
struct scatterlist sg_out; struct scatterlist sg_out[4];
sector_t iv_sector; sector_t iv_sector;
}; };
...@@ -118,6 +127,11 @@ struct iv_tcw_private { ...@@ -118,6 +127,11 @@ struct iv_tcw_private {
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
enum cipher_flags {
CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */
CRYPT_MODE_INTEGRITY_HMAC, /* Compose authenticated mode from normal mode and HMAC */
};
/* /*
* The fields in here must be read only after initialization. * The fields in here must be read only after initialization.
*/ */
...@@ -126,11 +140,14 @@ struct crypt_config { ...@@ -126,11 +140,14 @@ struct crypt_config {
sector_t start; sector_t start;
/* /*
* pool for per bio private data, crypto requests and * pool for per bio private data, crypto requests,
* encryption requeusts/buffer pages * encryption requeusts/buffer pages and integrity tags
*/ */
mempool_t *req_pool; mempool_t *req_pool;
mempool_t *page_pool; mempool_t *page_pool;
mempool_t *tag_pool;
unsigned tag_pool_max_sectors;
struct bio_set *bs; struct bio_set *bs;
struct mutex bio_alloc_lock; struct mutex bio_alloc_lock;
...@@ -143,6 +160,7 @@ struct crypt_config { ...@@ -143,6 +160,7 @@ struct crypt_config {
char *cipher; char *cipher;
char *cipher_string; char *cipher_string;
char *cipher_auth;
char *key_string; char *key_string;
const struct crypt_iv_operations *iv_gen_ops; const struct crypt_iv_operations *iv_gen_ops;
...@@ -157,8 +175,12 @@ struct crypt_config { ...@@ -157,8 +175,12 @@ struct crypt_config {
/* ESSIV: struct crypto_cipher *essiv_tfm */ /* ESSIV: struct crypto_cipher *essiv_tfm */
void *iv_private; void *iv_private;
struct crypto_skcipher **tfms; union {
struct crypto_skcipher **tfms;
struct crypto_aead **tfms_aead;
} cipher_tfm;
unsigned tfms_count; unsigned tfms_count;
unsigned long cipher_flags;
/* /*
* Layout of each crypto request: * Layout of each crypto request:
...@@ -181,21 +203,36 @@ struct crypt_config { ...@@ -181,21 +203,36 @@ struct crypt_config {
unsigned int key_size; unsigned int key_size;
unsigned int key_parts; /* independent parts in key buffer */ unsigned int key_parts; /* independent parts in key buffer */
unsigned int key_extra_size; /* additional keys length */ unsigned int key_extra_size; /* additional keys length */
unsigned int key_mac_size; /* MAC key size for authenc(...) */
unsigned int integrity_tag_size;
unsigned int integrity_iv_size;
unsigned int on_disk_tag_size;
u8 *authenc_key; /* space for keys in authenc() format (if used) */
u8 key[0]; u8 key[0];
}; };
#define MIN_IOS 64 #define MIN_IOS 64
#define MAX_TAG_SIZE 480
#define POOL_ENTRY_SIZE 512
static void clone_init(struct dm_crypt_io *, struct bio *); static void clone_init(struct dm_crypt_io *, struct bio *);
static void kcryptd_queue_crypt(struct dm_crypt_io *io); static void kcryptd_queue_crypt(struct dm_crypt_io *io);
static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
struct scatterlist *sg);
/* /*
* Use this to access cipher attributes that are the same for each CPU. * Use this to access cipher attributes that are the same for each CPU.
*/ */
static struct crypto_skcipher *any_tfm(struct crypt_config *cc) static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
{ {
return cc->tfms[0]; return cc->cipher_tfm.tfms[0];
}
static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
{
return cc->cipher_tfm.tfms_aead[0];
} }
/* /*
...@@ -325,8 +362,7 @@ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, ...@@ -325,8 +362,7 @@ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
return essiv_tfm; return essiv_tfm;
} }
if (crypto_cipher_blocksize(essiv_tfm) != if (crypto_cipher_blocksize(essiv_tfm) != cc->iv_size) {
crypto_skcipher_ivsize(any_tfm(cc))) {
ti->error = "Block size of ESSIV cipher does " ti->error = "Block size of ESSIV cipher does "
"not match IV size of block cipher"; "not match IV size of block cipher";
crypto_free_cipher(essiv_tfm); crypto_free_cipher(essiv_tfm);
...@@ -395,6 +431,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, ...@@ -395,6 +431,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
essiv_tfm = setup_essiv_cpu(cc, ti, salt, essiv_tfm = setup_essiv_cpu(cc, ti, salt,
crypto_ahash_digestsize(hash_tfm)); crypto_ahash_digestsize(hash_tfm));
if (IS_ERR(essiv_tfm)) { if (IS_ERR(essiv_tfm)) {
crypt_iv_essiv_dtr(cc); crypt_iv_essiv_dtr(cc);
return PTR_ERR(essiv_tfm); return PTR_ERR(essiv_tfm);
...@@ -585,12 +622,14 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, ...@@ -585,12 +622,14 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq) struct dm_crypt_request *dmreq)
{ {
struct scatterlist *sg;
u8 *src; u8 *src;
int r = 0; int r = 0;
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
src = kmap_atomic(sg_page(&dmreq->sg_in)); sg = crypt_get_sg_data(cc, dmreq->sg_in);
r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); src = kmap_atomic(sg_page(sg));
r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
kunmap_atomic(src); kunmap_atomic(src);
} else } else
memset(iv, 0, cc->iv_size); memset(iv, 0, cc->iv_size);
...@@ -601,18 +640,20 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, ...@@ -601,18 +640,20 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq) struct dm_crypt_request *dmreq)
{ {
struct scatterlist *sg;
u8 *dst; u8 *dst;
int r; int r;
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
return 0; return 0;
dst = kmap_atomic(sg_page(&dmreq->sg_out)); sg = crypt_get_sg_data(cc, dmreq->sg_out);
r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); dst = kmap_atomic(sg_page(sg));
r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
/* Tweak the first block of plaintext sector */ /* Tweak the first block of plaintext sector */
if (!r) if (!r)
crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); crypto_xor(dst + sg->offset, iv, cc->iv_size);
kunmap_atomic(dst); kunmap_atomic(dst);
return r; return r;
...@@ -724,6 +765,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc, ...@@ -724,6 +765,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq) struct dm_crypt_request *dmreq)
{ {
struct scatterlist *sg;
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
__le64 sector = cpu_to_le64(dmreq->iv_sector); __le64 sector = cpu_to_le64(dmreq->iv_sector);
u8 *src; u8 *src;
...@@ -731,8 +773,9 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, ...@@ -731,8 +773,9 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
/* Remove whitening from ciphertext */ /* Remove whitening from ciphertext */
if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
src = kmap_atomic(sg_page(&dmreq->sg_in)); sg = crypt_get_sg_data(cc, dmreq->sg_in);
r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset); src = kmap_atomic(sg_page(sg));
r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
kunmap_atomic(src); kunmap_atomic(src);
} }
...@@ -748,6 +791,7 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, ...@@ -748,6 +791,7 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq) struct dm_crypt_request *dmreq)
{ {
struct scatterlist *sg;
u8 *dst; u8 *dst;
int r; int r;
...@@ -755,13 +799,22 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, ...@@ -755,13 +799,22 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
return 0; return 0;
/* Apply whitening on ciphertext */ /* Apply whitening on ciphertext */
dst = kmap_atomic(sg_page(&dmreq->sg_out)); sg = crypt_get_sg_data(cc, dmreq->sg_out);
r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset); dst = kmap_atomic(sg_page(sg));
r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
kunmap_atomic(dst); kunmap_atomic(dst);
return r; return r;
} }
static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
/* Used only for writes, there must be an additional space to store IV */
get_random_bytes(iv, cc->iv_size);
return 0;
}
static const struct crypt_iv_operations crypt_iv_plain_ops = { static const struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen .generator = crypt_iv_plain_gen
}; };
...@@ -806,6 +859,108 @@ static const struct crypt_iv_operations crypt_iv_tcw_ops = { ...@@ -806,6 +859,108 @@ static const struct crypt_iv_operations crypt_iv_tcw_ops = {
.post = crypt_iv_tcw_post .post = crypt_iv_tcw_post
}; };
static struct crypt_iv_operations crypt_iv_random_ops = {
.generator = crypt_iv_random_gen
};
/*
* Integrity extensions
*/
static bool crypt_integrity_aead(struct crypt_config *cc)
{
return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
}
static bool crypt_integrity_hmac(struct crypt_config *cc)
{
return test_bit(CRYPT_MODE_INTEGRITY_HMAC, &cc->cipher_flags);
}
static bool crypt_integrity_mode(struct crypt_config *cc)
{
return crypt_integrity_aead(cc) || crypt_integrity_hmac(cc);
}
/* Get sg containing data */
static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
struct scatterlist *sg)
{
if (unlikely(crypt_integrity_mode(cc)))
return &sg[2];
return sg;
}
static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
{
struct bio_integrity_payload *bip;
unsigned int tag_len;
int ret;
if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
return 0;
bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
if (IS_ERR(bip))
return PTR_ERR(bip);
tag_len = io->cc->on_disk_tag_size * bio_sectors(bio);
bip->bip_iter.bi_size = tag_len;
bip->bip_iter.bi_sector = io->cc->start + io->sector;
/* We own the metadata, do not let bio_free to release it */
bip->bip_flags &= ~BIP_BLOCK_INTEGRITY;
ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
tag_len, offset_in_page(io->integrity_metadata));
if (unlikely(ret != tag_len))
return -ENOMEM;
return 0;
}
static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
{
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
/* From now we require underlying device with our integrity profile */
if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
ti->error = "Integrity profile not supported.";
return -EINVAL;
}
if (bi->tag_size != cc->on_disk_tag_size) {
ti->error = "Integrity profile tag size mismatch.";
return -EINVAL;
}
if (crypt_integrity_mode(cc)) {
cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
DMINFO("Integrity AEAD, tag size %u, IV size %u.",
cc->integrity_tag_size, cc->integrity_iv_size);
if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
ti->error = "Integrity AEAD auth tag size is not supported.";
return -EINVAL;
}
} else if (cc->integrity_iv_size)
DMINFO("Additional per-sector space %u bytes for IV.",
cc->integrity_iv_size);
if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
ti->error = "Not enough space for integrity tag in the profile.";
return -EINVAL;
}
return 0;
#else
ti->error = "Integrity profile not supported.";
return -EINVAL;
#endif
}
static void crypt_convert_init(struct crypt_config *cc, static void crypt_convert_init(struct crypt_config *cc,
struct convert_context *ctx, struct convert_context *ctx,
struct bio *bio_out, struct bio *bio_in, struct bio *bio_out, struct bio *bio_in,
...@@ -822,58 +977,207 @@ static void crypt_convert_init(struct crypt_config *cc, ...@@ -822,58 +977,207 @@ static void crypt_convert_init(struct crypt_config *cc,
} }
static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
struct skcipher_request *req) void *req)
{ {
return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
} }
static struct skcipher_request *req_of_dmreq(struct crypt_config *cc, static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
struct dm_crypt_request *dmreq)
{ {
return (struct skcipher_request *)((char *)dmreq - cc->dmreq_start); return (void *)((char *)dmreq - cc->dmreq_start);
} }
static u8 *iv_of_dmreq(struct crypt_config *cc, static u8 *iv_of_dmreq(struct crypt_config *cc,
struct dm_crypt_request *dmreq) struct dm_crypt_request *dmreq)
{ {
return (u8 *)ALIGN((unsigned long)(dmreq + 1), if (crypt_integrity_mode(cc))
crypto_skcipher_alignmask(any_tfm(cc)) + 1); return (u8 *)ALIGN((unsigned long)(dmreq + 1),
crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
else
return (u8 *)ALIGN((unsigned long)(dmreq + 1),
crypto_skcipher_alignmask(any_tfm(cc)) + 1);
} }
static int crypt_convert_block(struct crypt_config *cc, static u8 *org_iv_of_dmreq(struct crypt_config *cc,
struct convert_context *ctx, struct dm_crypt_request *dmreq)
struct skcipher_request *req) {
return iv_of_dmreq(cc, dmreq) + cc->iv_size;
}
static uint64_t *org_sector_of_dmreq(struct crypt_config *cc,
struct dm_crypt_request *dmreq)
{
u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
return (uint64_t*) ptr;
}
static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
struct dm_crypt_request *dmreq)
{
u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
cc->iv_size + sizeof(uint64_t);
return (unsigned int*)ptr;
}
static void *tag_from_dmreq(struct crypt_config *cc,
struct dm_crypt_request *dmreq)
{
struct convert_context *ctx = dmreq->ctx;
struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
cc->on_disk_tag_size];
}
static void *iv_tag_from_dmreq(struct crypt_config *cc,
struct dm_crypt_request *dmreq)
{
return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
}
static int crypt_convert_block_aead(struct crypt_config *cc,
struct convert_context *ctx,
struct aead_request *req,
unsigned int tag_offset)
{ {
struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
struct dm_crypt_request *dmreq; struct dm_crypt_request *dmreq;
u8 *iv; unsigned int data_len = 1 << SECTOR_SHIFT;
int r; u8 *iv, *org_iv, *tag_iv, *tag;
uint64_t *sector;
int r = 0;
BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
dmreq = dmreq_of_req(cc, req); dmreq = dmreq_of_req(cc, req);
dmreq->iv_sector = ctx->cc_sector;
dmreq->ctx = ctx;
*org_tag_of_dmreq(cc, dmreq) = tag_offset;
sector = org_sector_of_dmreq(cc, dmreq);
*sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
iv = iv_of_dmreq(cc, dmreq); iv = iv_of_dmreq(cc, dmreq);
org_iv = org_iv_of_dmreq(cc, dmreq);
tag = tag_from_dmreq(cc, dmreq);
tag_iv = iv_tag_from_dmreq(cc, dmreq);
/* AEAD request:
* |----- AAD -------|------ DATA -------|-- AUTH TAG --|
* | (authenticated) | (auth+encryption) | |
* | sector_LE | IV | sector in/out | tag in/out |
*/
sg_init_table(dmreq->sg_in, 4);
sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, data_len, bv_in.bv_offset);
sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
sg_init_table(dmreq->sg_out, 4);
sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, data_len, bv_out.bv_offset);
sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
if (cc->iv_gen_ops) {
/* For READs use IV stored in integrity metadata */
if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
memcpy(org_iv, tag_iv, cc->iv_size);
} else {
r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
if (r < 0)
return r;
/* Store generated IV in integrity metadata */
if (cc->integrity_iv_size)
memcpy(tag_iv, org_iv, cc->iv_size);
}
/* Working copy of IV, to be modified in crypto API */
memcpy(iv, org_iv, cc->iv_size);
}
aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
if (bio_data_dir(ctx->bio_in) == WRITE) {
aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
data_len, iv);
r = crypto_aead_encrypt(req);
if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
} else {
aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
data_len + cc->integrity_tag_size, iv);
r = crypto_aead_decrypt(req);
}
if (r == -EBADMSG)
DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
(unsigned long long)le64_to_cpu(*sector));
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
bio_advance_iter(ctx->bio_in, &ctx->iter_in, data_len);
bio_advance_iter(ctx->bio_out, &ctx->iter_out, data_len);
return r;
}
static int crypt_convert_block_skcipher(struct crypt_config *cc,
struct convert_context *ctx,
struct skcipher_request *req,
unsigned int tag_offset)
{
struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
struct scatterlist *sg_in, *sg_out;
struct dm_crypt_request *dmreq;
unsigned int data_len = 1 << SECTOR_SHIFT;
u8 *iv, *org_iv, *tag_iv;
uint64_t *sector;
int r = 0;
dmreq = dmreq_of_req(cc, req);
dmreq->iv_sector = ctx->cc_sector; dmreq->iv_sector = ctx->cc_sector;
dmreq->ctx = ctx; dmreq->ctx = ctx;
sg_init_table(&dmreq->sg_in, 1);
sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
bv_in.bv_offset);
sg_init_table(&dmreq->sg_out, 1); *org_tag_of_dmreq(cc, dmreq) = tag_offset;
sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
bv_out.bv_offset); iv = iv_of_dmreq(cc, dmreq);
org_iv = org_iv_of_dmreq(cc, dmreq);
tag_iv = iv_tag_from_dmreq(cc, dmreq);
sector = org_sector_of_dmreq(cc, dmreq);
*sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
/* For skcipher we use only the first sg item */
sg_in = &dmreq->sg_in[0];
sg_out = &dmreq->sg_out[0];
bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT); sg_init_table(sg_in, 1);
bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT); sg_set_page(sg_in, bv_in.bv_page, data_len, bv_in.bv_offset);
sg_init_table(sg_out, 1);
sg_set_page(sg_out, bv_out.bv_page, data_len, bv_out.bv_offset);
if (cc->iv_gen_ops) { if (cc->iv_gen_ops) {
r = cc->iv_gen_ops->generator(cc, iv, dmreq); /* For READs use IV stored in integrity metadata */
if (r < 0) if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
return r; memcpy(org_iv, tag_iv, cc->integrity_iv_size);
} else {
r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
if (r < 0)
return r;
/* Store generated IV in integrity metadata */
if (cc->integrity_iv_size)
memcpy(tag_iv, org_iv, cc->integrity_iv_size);
}
/* Working copy of IV, to be modified in crypto API */
memcpy(iv, org_iv, cc->iv_size);
} }
skcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, skcipher_request_set_crypt(req, sg_in, sg_out, data_len, iv);
1 << SECTOR_SHIFT, iv);
if (bio_data_dir(ctx->bio_in) == WRITE) if (bio_data_dir(ctx->bio_in) == WRITE)
r = crypto_skcipher_encrypt(req); r = crypto_skcipher_encrypt(req);
...@@ -881,7 +1185,10 @@ static int crypt_convert_block(struct crypt_config *cc, ...@@ -881,7 +1185,10 @@ static int crypt_convert_block(struct crypt_config *cc,
r = crypto_skcipher_decrypt(req); r = crypto_skcipher_decrypt(req);
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
r = cc->iv_gen_ops->post(cc, iv, dmreq); r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
bio_advance_iter(ctx->bio_in, &ctx->iter_in, data_len);
bio_advance_iter(ctx->bio_out, &ctx->iter_out, data_len);
return r; return r;
} }
...@@ -889,27 +1196,53 @@ static int crypt_convert_block(struct crypt_config *cc, ...@@ -889,27 +1196,53 @@ static int crypt_convert_block(struct crypt_config *cc,
static void kcryptd_async_done(struct crypto_async_request *async_req, static void kcryptd_async_done(struct crypto_async_request *async_req,
int error); int error);
static void crypt_alloc_req(struct crypt_config *cc, static void crypt_alloc_req_skcipher(struct crypt_config *cc,
struct convert_context *ctx) struct convert_context *ctx)
{ {
unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
if (!ctx->req) if (!ctx->r.req)
ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); ctx->r.req = mempool_alloc(cc->req_pool, GFP_NOIO);
skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
/*
* Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
* requests if driver request queue is full.
*/
skcipher_request_set_callback(ctx->r.req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
}
static void crypt_alloc_req_aead(struct crypt_config *cc,
struct convert_context *ctx)
{
if (!ctx->r.req_aead)
ctx->r.req_aead = mempool_alloc(cc->req_pool, GFP_NOIO);
skcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
/* /*
* Use REQ_MAY_BACKLOG so a cipher driver internally backlogs * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
* requests if driver request queue is full. * requests if driver request queue is full.
*/ */
skcipher_request_set_callback(ctx->req, aead_request_set_callback(ctx->r.req_aead,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
kcryptd_async_done, dmreq_of_req(cc, ctx->req)); kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
}
static void crypt_alloc_req(struct crypt_config *cc,
struct convert_context *ctx)
{
if (crypt_integrity_mode(cc))
crypt_alloc_req_aead(cc, ctx);
else
crypt_alloc_req_skcipher(cc, ctx);
} }
static void crypt_free_req(struct crypt_config *cc, static void crypt_free_req_skcipher(struct crypt_config *cc,
struct skcipher_request *req, struct bio *base_bio) struct skcipher_request *req, struct bio *base_bio)
{ {
struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
...@@ -917,12 +1250,30 @@ static void crypt_free_req(struct crypt_config *cc, ...@@ -917,12 +1250,30 @@ static void crypt_free_req(struct crypt_config *cc,
mempool_free(req, cc->req_pool); mempool_free(req, cc->req_pool);
} }
static void crypt_free_req_aead(struct crypt_config *cc,
struct aead_request *req, struct bio *base_bio)
{
struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
if ((struct aead_request *)(io + 1) != req)
mempool_free(req, cc->req_pool);
}
static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
{
if (crypt_integrity_mode(cc))
crypt_free_req_aead(cc, req, base_bio);
else
crypt_free_req_skcipher(cc, req, base_bio);
}
/* /*
* Encrypt / decrypt data from one bio to another one (can be the same one) * Encrypt / decrypt data from one bio to another one (can be the same one)
*/ */
static int crypt_convert(struct crypt_config *cc, static int crypt_convert(struct crypt_config *cc,
struct convert_context *ctx) struct convert_context *ctx)
{ {
unsigned int tag_offset = 0;
int r; int r;
atomic_set(&ctx->cc_pending, 1); atomic_set(&ctx->cc_pending, 1);
...@@ -933,7 +1284,10 @@ static int crypt_convert(struct crypt_config *cc, ...@@ -933,7 +1284,10 @@ static int crypt_convert(struct crypt_config *cc,
atomic_inc(&ctx->cc_pending); atomic_inc(&ctx->cc_pending);
r = crypt_convert_block(cc, ctx, ctx->req); if (crypt_integrity_mode(cc))
r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
else
r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
switch (r) { switch (r) {
/* /*
...@@ -949,8 +1303,9 @@ static int crypt_convert(struct crypt_config *cc, ...@@ -949,8 +1303,9 @@ static int crypt_convert(struct crypt_config *cc,
* completion function kcryptd_async_done() will be called. * completion function kcryptd_async_done() will be called.
*/ */
case -EINPROGRESS: case -EINPROGRESS:
ctx->req = NULL; ctx->r.req = NULL;
ctx->cc_sector++; ctx->cc_sector++;
tag_offset++;
continue; continue;
/* /*
* The request was already processed (synchronously). * The request was already processed (synchronously).
...@@ -958,13 +1313,21 @@ static int crypt_convert(struct crypt_config *cc, ...@@ -958,13 +1313,21 @@ static int crypt_convert(struct crypt_config *cc,
case 0: case 0:
atomic_dec(&ctx->cc_pending); atomic_dec(&ctx->cc_pending);
ctx->cc_sector++; ctx->cc_sector++;
tag_offset++;
cond_resched(); cond_resched();
continue; continue;
/*
/* There was an error while processing the request. */ * There was a data integrity error.
*/
case -EBADMSG:
atomic_dec(&ctx->cc_pending);
return -EILSEQ;
/*
* There was an error while processing the request.
*/
default: default:
atomic_dec(&ctx->cc_pending); atomic_dec(&ctx->cc_pending);
return r; return -EIO;
} }
} }
...@@ -1005,7 +1368,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) ...@@ -1005,7 +1368,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
if (!clone) if (!clone)
goto return_clone; goto out;
clone_init(io, clone); clone_init(io, clone);
...@@ -1027,7 +1390,13 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) ...@@ -1027,7 +1390,13 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
remaining_size -= len; remaining_size -= len;
} }
return_clone: /* Allocate space for integrity tags */
if (dm_crypt_integrity_io_alloc(io, clone)) {
crypt_free_buffer_pages(cc, clone);
bio_put(clone);
clone = NULL;
}
out:
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
mutex_unlock(&cc->bio_alloc_lock); mutex_unlock(&cc->bio_alloc_lock);
...@@ -1053,7 +1422,9 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, ...@@ -1053,7 +1422,9 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
io->base_bio = bio; io->base_bio = bio;
io->sector = sector; io->sector = sector;
io->error = 0; io->error = 0;
io->ctx.req = NULL; io->ctx.r.req = NULL;
io->integrity_metadata = NULL;
io->integrity_metadata_from_pool = false;
atomic_set(&io->io_pending, 0); atomic_set(&io->io_pending, 0);
} }
...@@ -1075,8 +1446,13 @@ static void crypt_dec_pending(struct dm_crypt_io *io) ...@@ -1075,8 +1446,13 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
if (!atomic_dec_and_test(&io->io_pending)) if (!atomic_dec_and_test(&io->io_pending))
return; return;
if (io->ctx.req) if (io->ctx.r.req)
crypt_free_req(cc, io->ctx.req, base_bio); crypt_free_req(cc, io->ctx.r.req, base_bio);
if (unlikely(io->integrity_metadata_from_pool))
mempool_free(io->integrity_metadata, io->cc->tag_pool);
else
kfree(io->integrity_metadata);
base_bio->bi_error = error; base_bio->bi_error = error;
bio_endio(base_bio); bio_endio(base_bio);
...@@ -1156,6 +1532,12 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) ...@@ -1156,6 +1532,12 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
clone_init(io, clone); clone_init(io, clone);
clone->bi_iter.bi_sector = cc->start + io->sector; clone->bi_iter.bi_sector = cc->start + io->sector;
if (dm_crypt_integrity_io_alloc(io, clone)) {
crypt_dec_pending(io);
bio_put(clone);
return 1;
}
generic_make_request(clone); generic_make_request(clone);
return 0; return 0;
} }
...@@ -1314,8 +1696,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) ...@@ -1314,8 +1696,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
crypt_inc_pending(io); crypt_inc_pending(io);
r = crypt_convert(cc, &io->ctx); r = crypt_convert(cc, &io->ctx);
if (r) if (r < 0)
io->error = -EIO; io->error = r;
crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
/* Encryption was already finished, submit io now */ /* Encryption was already finished, submit io now */
...@@ -1345,7 +1727,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) ...@@ -1345,7 +1727,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
r = crypt_convert(cc, &io->ctx); r = crypt_convert(cc, &io->ctx);
if (r < 0) if (r < 0)
io->error = -EIO; io->error = r;
if (atomic_dec_and_test(&io->ctx.cc_pending)) if (atomic_dec_and_test(&io->ctx.cc_pending))
kcryptd_crypt_read_done(io); kcryptd_crypt_read_done(io);
...@@ -1372,9 +1754,13 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, ...@@ -1372,9 +1754,13 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
} }
if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
if (error < 0) if (error == -EBADMSG) {
DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
(unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
io->error = -EILSEQ;
} else if (error < 0)
io->error = -EIO; io->error = -EIO;
crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
...@@ -1430,37 +1816,59 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size) ...@@ -1430,37 +1816,59 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
return 0; return 0;
} }
static void crypt_free_tfms(struct crypt_config *cc) static void crypt_free_tfms_aead(struct crypt_config *cc)
{
if (!cc->cipher_tfm.tfms_aead)
return;
if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
cc->cipher_tfm.tfms_aead[0] = NULL;
}
kfree(cc->cipher_tfm.tfms_aead);
cc->cipher_tfm.tfms_aead = NULL;
}
static void crypt_free_tfms_skcipher(struct crypt_config *cc)
{ {
unsigned i; unsigned i;
if (!cc->tfms) if (!cc->cipher_tfm.tfms)
return; return;
for (i = 0; i < cc->tfms_count; i++) for (i = 0; i < cc->tfms_count; i++)
if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) { if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
crypto_free_skcipher(cc->tfms[i]); crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
cc->tfms[i] = NULL; cc->cipher_tfm.tfms[i] = NULL;
} }
kfree(cc->tfms); kfree(cc->cipher_tfm.tfms);
cc->tfms = NULL; cc->cipher_tfm.tfms = NULL;
} }
static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) static void crypt_free_tfms(struct crypt_config *cc)
{
if (crypt_integrity_mode(cc))
crypt_free_tfms_aead(cc);
else
crypt_free_tfms_skcipher(cc);
}
static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
{ {
unsigned i; unsigned i;
int err; int err;
cc->tfms = kzalloc(cc->tfms_count * sizeof(struct crypto_skcipher *), cc->cipher_tfm.tfms = kzalloc(cc->tfms_count *
GFP_KERNEL); sizeof(struct crypto_skcipher *), GFP_KERNEL);
if (!cc->tfms) if (!cc->cipher_tfm.tfms)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < cc->tfms_count; i++) { for (i = 0; i < cc->tfms_count; i++) {
cc->tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0); cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0);
if (IS_ERR(cc->tfms[i])) { if (IS_ERR(cc->cipher_tfm.tfms[i])) {
err = PTR_ERR(cc->tfms[i]); err = PTR_ERR(cc->cipher_tfm.tfms[i]);
crypt_free_tfms(cc); crypt_free_tfms(cc);
return err; return err;
} }
...@@ -1469,22 +1877,111 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) ...@@ -1469,22 +1877,111 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
return 0; return 0;
} }
static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
{
char *authenc = NULL;
int err;
cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
if (!cc->cipher_tfm.tfms)
return -ENOMEM;
/* Compose AEAD cipher with autenc(authenticator,cipher) structure */
if (crypt_integrity_hmac(cc)) {
authenc = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
if (!authenc)
return -ENOMEM;
err = snprintf(authenc, CRYPTO_MAX_ALG_NAME,
"authenc(%s,%s)", cc->cipher_auth, ciphermode);
if (err < 0) {
kzfree(authenc);
return err;
}
ciphermode = authenc;
}
cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0);
if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
crypt_free_tfms(cc);
return err;
}
kzfree(authenc);
return 0;
}
static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
{
if (crypt_integrity_mode(cc))
return crypt_alloc_tfms_aead(cc, ciphermode);
else
return crypt_alloc_tfms_skcipher(cc, ciphermode);
}
static unsigned crypt_subkey_size(struct crypt_config *cc)
{
return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
}
static unsigned crypt_authenckey_size(struct crypt_config *cc)
{
return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
}
/*
* If AEAD is composed like authenc(hmac(sha256),xts(aes)),
* the key must be for some reason in special format.
* This funcion converts cc->key to this special format.
*/
static void crypt_copy_authenckey(char *p, const void *key,
unsigned enckeylen, unsigned authkeylen)
{
struct crypto_authenc_key_param *param;
struct rtattr *rta;
rta = (struct rtattr *)p;
param = RTA_DATA(rta);
param->enckeylen = cpu_to_be32(enckeylen);
rta->rta_len = RTA_LENGTH(sizeof(*param));
rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
p += RTA_SPACE(sizeof(*param));
memcpy(p, key + enckeylen, authkeylen);
p += authkeylen;
memcpy(p, key, enckeylen);
}
static int crypt_setkey(struct crypt_config *cc) static int crypt_setkey(struct crypt_config *cc)
{ {
unsigned subkey_size; unsigned subkey_size;
int err = 0, i, r; int err = 0, i, r;
/* Ignore extra keys (which are used for IV etc) */ /* Ignore extra keys (which are used for IV etc) */
subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); subkey_size = crypt_subkey_size(cc);
if (crypt_integrity_hmac(cc))
crypt_copy_authenckey(cc->authenc_key, cc->key,
subkey_size - cc->key_mac_size,
cc->key_mac_size);
for (i = 0; i < cc->tfms_count; i++) { for (i = 0; i < cc->tfms_count; i++) {
r = crypto_skcipher_setkey(cc->tfms[i], if (crypt_integrity_aead(cc))
cc->key + (i * subkey_size), r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
subkey_size); cc->key + (i * subkey_size),
subkey_size);
else if (crypt_integrity_hmac(cc))
r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
cc->authenc_key, crypt_authenckey_size(cc));
else
r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
cc->key + (i * subkey_size),
subkey_size);
if (r) if (r)
err = r; err = r;
} }
if (crypt_integrity_hmac(cc))
memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
return err; return err;
} }
...@@ -1681,6 +2178,7 @@ static void crypt_dtr(struct dm_target *ti) ...@@ -1681,6 +2178,7 @@ static void crypt_dtr(struct dm_target *ti)
mempool_destroy(cc->page_pool); mempool_destroy(cc->page_pool);
mempool_destroy(cc->req_pool); mempool_destroy(cc->req_pool);
mempool_destroy(cc->tag_pool);
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
cc->iv_gen_ops->dtr(cc); cc->iv_gen_ops->dtr(cc);
...@@ -1691,6 +2189,8 @@ static void crypt_dtr(struct dm_target *ti) ...@@ -1691,6 +2189,8 @@ static void crypt_dtr(struct dm_target *ti)
kzfree(cc->cipher); kzfree(cc->cipher);
kzfree(cc->cipher_string); kzfree(cc->cipher_string);
kzfree(cc->key_string); kzfree(cc->key_string);
kzfree(cc->cipher_auth);
kzfree(cc->authenc_key);
/* Must zero key material before freeing */ /* Must zero key material before freeing */
kzfree(cc); kzfree(cc);
...@@ -1731,7 +2231,6 @@ static int crypt_ctr_cipher(struct dm_target *ti, ...@@ -1731,7 +2231,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
return -EINVAL; return -EINVAL;
} }
cc->key_parts = cc->tfms_count; cc->key_parts = cc->tfms_count;
cc->key_extra_size = 0;
cc->cipher = kstrdup(cipher, GFP_KERNEL); cc->cipher = kstrdup(cipher, GFP_KERNEL);
if (!cc->cipher) if (!cc->cipher)
...@@ -1777,7 +2276,20 @@ static int crypt_ctr_cipher(struct dm_target *ti, ...@@ -1777,7 +2276,20 @@ static int crypt_ctr_cipher(struct dm_target *ti,
} }
/* Initialize IV */ /* Initialize IV */
cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc)); if (crypt_integrity_mode(cc))
cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
else
cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
if (crypt_integrity_hmac(cc)) {
cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
if (!cc->authenc_key) {
ret = -ENOMEM;
ti->error = "Error allocating authenc key space";
goto bad;
}
}
if (cc->iv_size) if (cc->iv_size)
/* at least a 64 bit sector number should fit in our buffer */ /* at least a 64 bit sector number should fit in our buffer */
cc->iv_size = max(cc->iv_size, cc->iv_size = max(cc->iv_size,
...@@ -1816,6 +2328,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, ...@@ -1816,6 +2328,10 @@ static int crypt_ctr_cipher(struct dm_target *ti,
cc->iv_gen_ops = &crypt_iv_tcw_ops; cc->iv_gen_ops = &crypt_iv_tcw_ops;
cc->key_parts += 2; /* IV + whitening */ cc->key_parts += 2; /* IV + whitening */
cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE; cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
} else if (strcmp(ivmode, "random") == 0) {
cc->iv_gen_ops = &crypt_iv_random_ops;
/* Need storage space in integrity fields. */
cc->integrity_iv_size = cc->iv_size;
} else { } else {
ret = -EINVAL; ret = -EINVAL;
ti->error = "Invalid IV mode"; ti->error = "Invalid IV mode";
...@@ -1857,6 +2373,75 @@ static int crypt_ctr_cipher(struct dm_target *ti, ...@@ -1857,6 +2373,75 @@ static int crypt_ctr_cipher(struct dm_target *ti,
return -ENOMEM; return -ENOMEM;
} }
static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
{
struct crypt_config *cc = ti->private;
struct dm_arg_set as;
static struct dm_arg _args[] = {
{0, 3, "Invalid number of feature args"},
};
unsigned int opt_params, val;
const char *opt_string, *sval;
int ret;
/* Optional parameters */
as.argc = argc;
as.argv = argv;
ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
if (ret)
return ret;
while (opt_params--) {
opt_string = dm_shift_arg(&as);
if (!opt_string) {
ti->error = "Not enough feature arguments";
return -EINVAL;
}
if (!strcasecmp(opt_string, "allow_discards"))
ti->num_discard_bios = 1;
else if (!strcasecmp(opt_string, "same_cpu_crypt"))
set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
if (val == 0 || val > MAX_TAG_SIZE) {
ti->error = "Invalid integrity arguments";
return -EINVAL;
}
cc->on_disk_tag_size = val;
sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
if (!strcasecmp(sval, "aead")) {
set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
} else if (!strncasecmp(sval, "hmac(", strlen("hmac("))) {
struct crypto_ahash *hmac_tfm = crypto_alloc_ahash(sval, 0, 0);
if (IS_ERR(hmac_tfm)) {
ti->error = "Error initializing HMAC integrity hash.";
return PTR_ERR(hmac_tfm);
}
cc->key_mac_size = crypto_ahash_digestsize(hmac_tfm);
crypto_free_ahash(hmac_tfm);
set_bit(CRYPT_MODE_INTEGRITY_HMAC, &cc->cipher_flags);
} else if (strcasecmp(sval, "none")) {
ti->error = "Unknown integrity profile";
return -EINVAL;
}
cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
if (!cc->cipher_auth)
return -ENOMEM;
} else {
ti->error = "Invalid feature arguments";
return -EINVAL;
}
}
return 0;
}
/* /*
* Construct an encryption mapping: * Construct an encryption mapping:
* <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start> * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
...@@ -1865,18 +2450,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -1865,18 +2450,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{ {
struct crypt_config *cc; struct crypt_config *cc;
int key_size; int key_size;
unsigned int opt_params; unsigned int align_mask;
unsigned long long tmpll; unsigned long long tmpll;
int ret; int ret;
size_t iv_size_padding; size_t iv_size_padding, additional_req_size;
struct dm_arg_set as;
const char *opt_string;
char dummy; char dummy;
static struct dm_arg _args[] = {
{0, 3, "Invalid number of feature args"},
};
if (argc < 5) { if (argc < 5) {
ti->error = "Not enough arguments"; ti->error = "Not enough arguments";
return -EINVAL; return -EINVAL;
...@@ -1896,38 +2475,59 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -1896,38 +2475,59 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->key_size = key_size; cc->key_size = key_size;
ti->private = cc; ti->private = cc;
/* Optional parameters need to be read before cipher constructor */
if (argc > 5) {
ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
if (ret)
goto bad;
}
ret = crypt_ctr_cipher(ti, argv[0], argv[1]); ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
if (ret < 0) if (ret < 0)
goto bad; goto bad;
cc->dmreq_start = sizeof(struct skcipher_request); if (crypt_integrity_mode(cc)) {
cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc)); cc->dmreq_start = sizeof(struct aead_request);
cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
} else {
cc->dmreq_start = sizeof(struct skcipher_request);
cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
align_mask = crypto_skcipher_alignmask(any_tfm(cc));
}
cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
if (crypto_skcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) { if (align_mask < CRYPTO_MINALIGN) {
/* Allocate the padding exactly */ /* Allocate the padding exactly */
iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request)) iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
& crypto_skcipher_alignmask(any_tfm(cc)); & align_mask;
} else { } else {
/* /*
* If the cipher requires greater alignment than kmalloc * If the cipher requires greater alignment than kmalloc
* alignment, we don't know the exact position of the * alignment, we don't know the exact position of the
* initialization vector. We must assume worst case. * initialization vector. We must assume worst case.
*/ */
iv_size_padding = crypto_skcipher_alignmask(any_tfm(cc)); iv_size_padding = align_mask;
} }
ret = -ENOMEM; ret = -ENOMEM;
cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size); /* ...| IV + padding | original IV | original sec. number | bio tag offset | */
additional_req_size = sizeof(struct dm_crypt_request) +
iv_size_padding + cc->iv_size +
cc->iv_size +
sizeof(uint64_t) +
sizeof(unsigned int);
cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + additional_req_size);
if (!cc->req_pool) { if (!cc->req_pool) {
ti->error = "Cannot allocate crypt request mempool"; ti->error = "Cannot allocate crypt request mempool";
goto bad; goto bad;
} }
cc->per_bio_data_size = ti->per_io_data_size = cc->per_bio_data_size = ti->per_io_data_size =
ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
ARCH_KMALLOC_MINALIGN); ARCH_KMALLOC_MINALIGN);
cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0); cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
...@@ -1964,39 +2564,20 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -1964,39 +2564,20 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
} }
cc->start = tmpll; cc->start = tmpll;
argv += 5; if (crypt_integrity_mode(cc) || cc->integrity_iv_size) {
argc -= 5; ret = crypt_integrity_ctr(cc, ti);
/* Optional parameters */
if (argc) {
as.argc = argc;
as.argv = argv;
ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
if (ret) if (ret)
goto bad; goto bad;
ret = -EINVAL; cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size;
while (opt_params--) { if (!cc->tag_pool_max_sectors)
opt_string = dm_shift_arg(&as); cc->tag_pool_max_sectors = 1;
if (!opt_string) {
ti->error = "Not enough feature arguments";
goto bad;
}
if (!strcasecmp(opt_string, "allow_discards"))
ti->num_discard_bios = 1;
else if (!strcasecmp(opt_string, "same_cpu_crypt"))
set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
else if (!strcasecmp(opt_string, "submit_from_crypt_cpus")) cc->tag_pool = mempool_create_kmalloc_pool(MIN_IOS,
set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); cc->tag_pool_max_sectors * cc->on_disk_tag_size);
if (!cc->tag_pool) {
else { ti->error = "Cannot allocate integrity tags mempool";
ti->error = "Invalid feature arguments"; goto bad;
goto bad;
}
} }
} }
...@@ -2062,12 +2643,29 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) ...@@ -2062,12 +2643,29 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
* Check if bio is too large, split as needed. * Check if bio is too large, split as needed.
*/ */
if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) && if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
bio_data_dir(bio) == WRITE) (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT)); dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
io = dm_per_bio_data(bio, cc->per_bio_data_size); io = dm_per_bio_data(bio, cc->per_bio_data_size);
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
io->ctx.req = (struct skcipher_request *)(io + 1);
if (cc->on_disk_tag_size) {
unsigned tag_len = cc->on_disk_tag_size * bio_sectors(bio);
if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
unlikely(!(io->integrity_metadata = kmalloc(tag_len,
GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
if (bio_sectors(bio) > cc->tag_pool_max_sectors)
dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
io->integrity_metadata = mempool_alloc(cc->tag_pool, GFP_NOIO);
io->integrity_metadata_from_pool = true;
}
}
if (crypt_integrity_mode(cc))
io->ctx.r.req_aead = (struct aead_request *)(io + 1);
else
io->ctx.r.req = (struct skcipher_request *)(io + 1);
if (bio_data_dir(io->base_bio) == READ) { if (bio_data_dir(io->base_bio) == READ) {
if (kcryptd_io_read(io, GFP_NOWAIT)) if (kcryptd_io_read(io, GFP_NOWAIT))
...@@ -2108,6 +2706,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type, ...@@ -2108,6 +2706,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
num_feature_args += !!ti->num_discard_bios; num_feature_args += !!ti->num_discard_bios;
num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags); num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
if (cc->on_disk_tag_size)
num_feature_args++;
if (num_feature_args) { if (num_feature_args) {
DMEMIT(" %d", num_feature_args); DMEMIT(" %d", num_feature_args);
if (ti->num_discard_bios) if (ti->num_discard_bios)
...@@ -2116,6 +2716,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type, ...@@ -2116,6 +2716,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
DMEMIT(" same_cpu_crypt"); DMEMIT(" same_cpu_crypt");
if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
DMEMIT(" submit_from_crypt_cpus"); DMEMIT(" submit_from_crypt_cpus");
if (cc->on_disk_tag_size)
DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
} }
break; break;
...@@ -2216,7 +2818,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) ...@@ -2216,7 +2818,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = { static struct target_type crypt_target = {
.name = "crypt", .name = "crypt",
.version = {1, 15, 0}, .version = {1, 16, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = crypt_ctr, .ctr = crypt_ctr,
.dtr = crypt_dtr, .dtr = crypt_dtr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment