Commit e7768e65 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.8/dm-fixes-2' of...

Merge tag 'for-6.8/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - Stable fixes for 3 DM targets (integrity, verity and crypt) to
   address systemic failure that can occur if user provided pages map to
   the same block.

 - Fix DM crypt to not allow modifying data that being encrypted for
   authenticated encryption.

 - Fix DM crypt and verity targets to align their respective bvec_iter
   struct members to avoid the need for byte level access (due to
   __packed attribute) that is costly on some arches (like RISC).

* tag 'for-6.8/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm-crypt, dm-integrity, dm-verity: bump target version
  dm-verity, dm-crypt: align "struct bvec_iter" correctly
  dm-crypt: recheck the integrity tag after a failure
  dm-crypt: don't modify the data when using authenticated encryption
  dm-verity: recheck the hash after a failure
  dm-integrity: recheck the integrity tag after a failure
parents 06b7ef70 0e0c50e8
......@@ -53,15 +53,17 @@
struct convert_context {
struct completion restart;
struct bio *bio_in;
struct bio *bio_out;
struct bvec_iter iter_in;
struct bio *bio_out;
struct bvec_iter iter_out;
u64 cc_sector;
atomic_t cc_pending;
u64 cc_sector;
union {
struct skcipher_request *req;
struct aead_request *req_aead;
} r;
bool aead_recheck;
bool aead_failed;
};
......@@ -82,6 +84,8 @@ struct dm_crypt_io {
blk_status_t error;
sector_t sector;
struct bvec_iter saved_bi_iter;
struct rb_node rb_node;
} CRYPTO_MINALIGN_ATTR;
......@@ -1370,10 +1374,13 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
if (r == -EBADMSG) {
sector_t s = le64_to_cpu(*sector);
DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
ctx->bio_in->bi_bdev, s);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
ctx->bio_in, s, 0);
ctx->aead_failed = true;
if (ctx->aead_recheck) {
DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
ctx->bio_in->bi_bdev, s);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
ctx->bio_in, s, 0);
}
}
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
......@@ -1757,6 +1764,8 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
io->base_bio = bio;
io->sector = sector;
io->error = 0;
io->ctx.aead_recheck = false;
io->ctx.aead_failed = false;
io->ctx.r.req = NULL;
io->integrity_metadata = NULL;
io->integrity_metadata_from_pool = false;
......@@ -1768,6 +1777,8 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
atomic_inc(&io->io_pending);
}
static void kcryptd_queue_read(struct dm_crypt_io *io);
/*
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
......@@ -1781,6 +1792,15 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
if (!atomic_dec_and_test(&io->io_pending))
return;
if (likely(!io->ctx.aead_recheck) && unlikely(io->ctx.aead_failed) &&
cc->on_disk_tag_size && bio_data_dir(base_bio) == READ) {
io->ctx.aead_recheck = true;
io->ctx.aead_failed = false;
io->error = 0;
kcryptd_queue_read(io);
return;
}
if (io->ctx.r.req)
crypt_free_req(cc, io->ctx.r.req, base_bio);
......@@ -1816,15 +1836,19 @@ static void crypt_endio(struct bio *clone)
struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->cc;
unsigned int rw = bio_data_dir(clone);
blk_status_t error;
blk_status_t error = clone->bi_status;
if (io->ctx.aead_recheck && !error) {
kcryptd_queue_crypt(io);
return;
}
/*
* free the processed pages
*/
if (rw == WRITE)
if (rw == WRITE || io->ctx.aead_recheck)
crypt_free_buffer_pages(cc, clone);
error = clone->bi_status;
bio_put(clone);
if (rw == READ && !error) {
......@@ -1845,6 +1869,22 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
struct crypt_config *cc = io->cc;
struct bio *clone;
if (io->ctx.aead_recheck) {
if (!(gfp & __GFP_DIRECT_RECLAIM))
return 1;
crypt_inc_pending(io);
clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
if (unlikely(!clone)) {
crypt_dec_pending(io);
return 1;
}
clone->bi_iter.bi_sector = cc->start + io->sector;
crypt_convert_init(cc, &io->ctx, clone, clone, io->sector);
io->saved_bi_iter = clone->bi_iter;
dm_submit_bio_remap(io->base_bio, clone);
return 0;
}
/*
* We need the original biovec array in order to decrypt the whole bio
* data *afterwards* -- thanks to immutable biovecs we don't need to
......@@ -2071,6 +2111,12 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
io->ctx.bio_out = clone;
io->ctx.iter_out = clone->bi_iter;
if (crypt_integrity_aead(cc)) {
bio_copy_data(clone, io->base_bio);
io->ctx.bio_in = clone;
io->ctx.iter_in = clone->bi_iter;
}
sector += bio_sectors(clone);
crypt_inc_pending(io);
......@@ -2107,6 +2153,14 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
{
if (io->ctx.aead_recheck) {
if (!io->error) {
io->ctx.bio_in->bi_iter = io->saved_bi_iter;
bio_copy_data(io->base_bio, io->ctx.bio_in);
}
crypt_free_buffer_pages(io->cc, io->ctx.bio_in);
bio_put(io->ctx.bio_in);
}
crypt_dec_pending(io);
}
......@@ -2136,11 +2190,17 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
crypt_inc_pending(io);
crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
io->sector);
if (io->ctx.aead_recheck) {
io->ctx.cc_sector = io->sector + cc->iv_offset;
r = crypt_convert(cc, &io->ctx,
test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
} else {
crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
io->sector);
r = crypt_convert(cc, &io->ctx,
test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
r = crypt_convert(cc, &io->ctx,
test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
}
/*
* Crypto API backlogged the request, because its queue was full
* and we're in softirq context, so continue from a workqueue
......@@ -2182,10 +2242,13 @@ static void kcryptd_async_done(void *data, int error)
if (error == -EBADMSG) {
sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
ctx->bio_in->bi_bdev, s);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
ctx->bio_in, s, 0);
ctx->aead_failed = true;
if (ctx->aead_recheck) {
DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
ctx->bio_in->bi_bdev, s);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
ctx->bio_in, s, 0);
}
io->error = BLK_STS_PROTECTION;
} else if (error < 0)
io->error = BLK_STS_IOERR;
......@@ -3110,7 +3173,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
if (!strcasecmp(sval, "aead")) {
set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
} else if (strcasecmp(sval, "none")) {
} else if (strcasecmp(sval, "none")) {
ti->error = "Unknown integrity profile";
return -EINVAL;
}
......@@ -3639,7 +3702,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
.version = {1, 24, 0},
.version = {1, 25, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
......
......@@ -278,6 +278,8 @@ struct dm_integrity_c {
atomic64_t number_of_mismatches;
mempool_t recheck_pool;
struct notifier_block reboot_notifier;
};
......@@ -1689,6 +1691,79 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
get_random_bytes(result, ic->tag_size);
}
static void integrity_recheck(struct dm_integrity_io *dio)
{
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
struct dm_integrity_c *ic = dio->ic;
struct bvec_iter iter;
struct bio_vec bv;
sector_t sector, logical_sector, area, offset;
char checksum_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
struct page *page;
void *buffer;
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
&dio->metadata_offset);
sector = get_data_sector(ic, area, offset);
logical_sector = dio->range.logical_sector;
page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
buffer = page_to_virt(page);
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
unsigned pos = 0;
do {
char *mem;
int r;
struct dm_io_request io_req;
struct dm_io_region io_loc;
io_req.bi_opf = REQ_OP_READ;
io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = buffer;
io_req.notify.fn = NULL;
io_req.client = ic->io;
io_loc.bdev = ic->dev->bdev;
io_loc.sector = sector;
io_loc.count = ic->sectors_per_block;
r = dm_io(&io_req, 1, &io_loc, NULL);
if (unlikely(r)) {
dio->bi_status = errno_to_blk_status(r);
goto free_ret;
}
integrity_sector_checksum(ic, logical_sector, buffer,
checksum_onstack);
r = dm_integrity_rw_tag(ic, checksum_onstack, &dio->metadata_block,
&dio->metadata_offset, ic->tag_size, TAG_CMP);
if (r) {
if (r > 0) {
DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
bio->bi_bdev, logical_sector);
atomic64_inc(&ic->number_of_mismatches);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
bio, logical_sector, 0);
r = -EILSEQ;
}
dio->bi_status = errno_to_blk_status(r);
goto free_ret;
}
mem = bvec_kmap_local(&bv);
memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT);
kunmap_local(mem);
pos += ic->sectors_per_block << SECTOR_SHIFT;
sector += ic->sectors_per_block;
logical_sector += ic->sectors_per_block;
} while (pos < bv.bv_len);
}
free_ret:
mempool_free(page, &ic->recheck_pool);
}
static void integrity_metadata(struct work_struct *w)
{
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
......@@ -1776,15 +1851,8 @@ static void integrity_metadata(struct work_struct *w)
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) {
if (r > 0) {
sector_t s;
s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
bio->bi_bdev, s);
r = -EILSEQ;
atomic64_inc(&ic->number_of_mismatches);
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
bio, s, 0);
integrity_recheck(dio);
goto skip_io;
}
if (likely(checksums != checksums_onstack))
kfree(checksums);
......@@ -4261,6 +4329,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
goto bad;
}
r = mempool_init_page_pool(&ic->recheck_pool, 1, 0);
if (r) {
ti->error = "Cannot allocate mempool";
goto bad;
}
ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
if (!ic->metadata_wq) {
......@@ -4609,6 +4683,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
kvfree(ic->bbs);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
mempool_exit(&ic->recheck_pool);
mempool_exit(&ic->journal_io_mempool);
if (ic->io)
dm_io_client_destroy(ic->io);
......@@ -4661,7 +4736,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
.version = {1, 10, 0},
.version = {1, 11, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
......
......@@ -482,6 +482,63 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
return 0;
}
static int verity_recheck_copy(struct dm_verity *v, struct dm_verity_io *io,
u8 *data, size_t len)
{
memcpy(data, io->recheck_buffer, len);
io->recheck_buffer += len;
return 0;
}
static int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
struct bvec_iter start, sector_t cur_block)
{
struct page *page;
void *buffer;
int r;
struct dm_io_request io_req;
struct dm_io_region io_loc;
page = mempool_alloc(&v->recheck_pool, GFP_NOIO);
buffer = page_to_virt(page);
io_req.bi_opf = REQ_OP_READ;
io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = buffer;
io_req.notify.fn = NULL;
io_req.client = v->io;
io_loc.bdev = v->data_dev->bdev;
io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
r = dm_io(&io_req, 1, &io_loc, NULL);
if (unlikely(r))
goto free_ret;
r = verity_hash(v, verity_io_hash_req(v, io), buffer,
1 << v->data_dev_block_bits,
verity_io_real_digest(v, io), true);
if (unlikely(r))
goto free_ret;
if (memcmp(verity_io_real_digest(v, io),
verity_io_want_digest(v, io), v->digest_size)) {
r = -EIO;
goto free_ret;
}
io->recheck_buffer = buffer;
r = verity_for_bv_block(v, io, &start, verity_recheck_copy);
if (unlikely(r))
goto free_ret;
r = 0;
free_ret:
mempool_free(page, &v->recheck_pool);
return r;
}
static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
u8 *data, size_t len)
{
......@@ -508,9 +565,7 @@ static int verity_verify_io(struct dm_verity_io *io)
{
bool is_zero;
struct dm_verity *v = io->v;
#if defined(CONFIG_DM_VERITY_FEC)
struct bvec_iter start;
#endif
struct bvec_iter iter_copy;
struct bvec_iter *iter;
struct crypto_wait wait;
......@@ -561,10 +616,7 @@ static int verity_verify_io(struct dm_verity_io *io)
if (unlikely(r < 0))
return r;
#if defined(CONFIG_DM_VERITY_FEC)
if (verity_fec_is_enabled(v))
start = *iter;
#endif
start = *iter;
r = verity_for_io_block(v, io, iter, &wait);
if (unlikely(r < 0))
return r;
......@@ -586,6 +638,10 @@ static int verity_verify_io(struct dm_verity_io *io)
* tasklet since it may sleep, so fallback to work-queue.
*/
return -EAGAIN;
} else if (verity_recheck(v, io, start, cur_block) == 0) {
if (v->validated_blocks)
set_bit(cur_block, v->validated_blocks);
continue;
#if defined(CONFIG_DM_VERITY_FEC)
} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
cur_block, NULL, &start) == 0) {
......@@ -941,6 +997,10 @@ static void verity_dtr(struct dm_target *ti)
if (v->verify_wq)
destroy_workqueue(v->verify_wq);
mempool_exit(&v->recheck_pool);
if (v->io)
dm_io_client_destroy(v->io);
if (v->bufio)
dm_bufio_client_destroy(v->bufio);
......@@ -1379,6 +1439,20 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
v->hash_blocks = hash_position;
r = mempool_init_page_pool(&v->recheck_pool, 1, 0);
if (unlikely(r)) {
ti->error = "Cannot allocate mempool";
goto bad;
}
v->io = dm_io_client_create();
if (IS_ERR(v->io)) {
r = PTR_ERR(v->io);
v->io = NULL;
ti->error = "Cannot allocate dm io";
goto bad;
}
v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
dm_bufio_alloc_callback, NULL,
......@@ -1486,7 +1560,7 @@ int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned i
static struct target_type verity_target = {
.name = "verity",
.features = DM_TARGET_IMMUTABLE,
.version = {1, 9, 0},
.version = {1, 10, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
......
......@@ -11,6 +11,7 @@
#ifndef DM_VERITY_H
#define DM_VERITY_H
#include <linux/dm-io.h>
#include <linux/dm-bufio.h>
#include <linux/device-mapper.h>
#include <linux/interrupt.h>
......@@ -68,6 +69,9 @@ struct dm_verity {
unsigned long *validated_blocks; /* bitset blocks validated */
char *signature_key_desc; /* signature keyring reference */
struct dm_io_client *io;
mempool_t recheck_pool;
};
struct dm_verity_io {
......@@ -76,14 +80,16 @@ struct dm_verity_io {
/* original value of bio->bi_end_io */
bio_end_io_t *orig_bi_end_io;
struct bvec_iter iter;
sector_t block;
unsigned int n_blocks;
bool in_tasklet;
struct bvec_iter iter;
struct work_struct work;
char *recheck_buffer;
/*
* Three variably-size fields follow this struct:
*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment