Commit 6d39a124 authored by Kees Cook's avatar Kees Cook Committed by Herbert Xu

dm: Remove VLA usage from hashes

In the quest to remove all stack VLA usage from the kernel[1], this uses
the new HASH_MAX_DIGESTSIZE from the crypto layer to allocate the upper
bounds on stack usage.

[1] https://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.comSigned-off-by: default avatarKees Cook <keescook@chromium.org>
Acked-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent a1b22a5f
...@@ -559,7 +559,12 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result ...@@ -559,7 +559,12 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
} }
memset(result + size, 0, JOURNAL_MAC_SIZE - size); memset(result + size, 0, JOURNAL_MAC_SIZE - size);
} else { } else {
__u8 digest[size]; __u8 digest[HASH_MAX_DIGESTSIZE];
if (WARN_ON(size > sizeof(digest))) {
dm_integrity_io_error(ic, "digest_size", -EINVAL);
goto err;
}
r = crypto_shash_final(desc, digest); r = crypto_shash_final(desc, digest);
if (unlikely(r)) { if (unlikely(r)) {
dm_integrity_io_error(ic, "crypto_shash_final", r); dm_integrity_io_error(ic, "crypto_shash_final", r);
...@@ -1324,7 +1329,7 @@ static void integrity_metadata(struct work_struct *w) ...@@ -1324,7 +1329,7 @@ static void integrity_metadata(struct work_struct *w)
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
char *checksums; char *checksums;
unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
char checksums_onstack[ic->tag_size + extra_space]; char checksums_onstack[HASH_MAX_DIGESTSIZE];
unsigned sectors_to_process = dio->range.n_sectors; unsigned sectors_to_process = dio->range.n_sectors;
sector_t sector = dio->range.logical_sector; sector_t sector = dio->range.logical_sector;
...@@ -1333,8 +1338,14 @@ static void integrity_metadata(struct work_struct *w) ...@@ -1333,8 +1338,14 @@ static void integrity_metadata(struct work_struct *w)
checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space, checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN); GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
if (!checksums) if (!checksums) {
checksums = checksums_onstack; checksums = checksums_onstack;
if (WARN_ON(extra_space &&
digest_size > sizeof(checksums_onstack))) {
r = -EINVAL;
goto error;
}
}
__bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) { __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
unsigned pos; unsigned pos;
...@@ -1546,7 +1557,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, ...@@ -1546,7 +1557,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
} while (++s < ic->sectors_per_block); } while (++s < ic->sectors_per_block);
#ifdef INTERNAL_VERIFY #ifdef INTERNAL_VERIFY
if (ic->internal_hash) { if (ic->internal_hash) {
char checksums_onstack[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)]; char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
...@@ -1596,7 +1607,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, ...@@ -1596,7 +1607,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
if (ic->internal_hash) { if (ic->internal_hash) {
unsigned digest_size = crypto_shash_digestsize(ic->internal_hash); unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
if (unlikely(digest_size > ic->tag_size)) { if (unlikely(digest_size > ic->tag_size)) {
char checksums_onstack[digest_size]; char checksums_onstack[HASH_MAX_DIGESTSIZE];
integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack); integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size); memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
} else } else
...@@ -2023,7 +2034,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, ...@@ -2023,7 +2034,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
unlikely(from_replay) && unlikely(from_replay) &&
#endif #endif
ic->internal_hash) { ic->internal_hash) {
char test_tag[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)]; char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block), integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
(char *)access_journal_data(ic, i, l), test_tag); (char *)access_journal_data(ic, i, l), test_tag);
......
...@@ -212,12 +212,15 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io, ...@@ -212,12 +212,15 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
struct dm_verity_fec_io *fio = fec_io(io); struct dm_verity_fec_io *fio = fec_io(io);
u64 block, ileaved; u64 block, ileaved;
u8 *bbuf, *rs_block; u8 *bbuf, *rs_block;
u8 want_digest[v->digest_size]; u8 want_digest[HASH_MAX_DIGESTSIZE];
unsigned n, k; unsigned n, k;
if (neras) if (neras)
*neras = 0; *neras = 0;
if (WARN_ON(v->digest_size > sizeof(want_digest)))
return -EINVAL;
/* /*
* read each of the rsn data blocks that are part of the RS block, and * read each of the rsn data blocks that are part of the RS block, and
* interleave contents to available bufs * interleave contents to available bufs
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment