Commit 6897cea7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.8/dm-fixes' of...

Merge tag 'for-6.8/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - Fix DM ioctl interface to avoid INT_MAX overflow warnings from
   kvmalloc by limiting the number of targets and parameter size area.

 - Fix DM stats to avoid INT_MAX overflow warnings from kvmalloc by
   limiting the number of entries supported.

 - Fix DM writecache to support mapping devices larger than 1 TiB by
   switching from using kvmalloc_array to vmalloc_array -- which avoids
   INT_MAX overflow in kvmalloc_node and associated warnings.

 - Remove the (ab)use of tasklets from both the DM crypt and verity
   targets. They will be converted to use BH workqueue in future.

* tag 'for-6.8/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm-crypt, dm-verity: disable tasklets
  dm writecache: allow allocations larger than 2GiB
  dm stats: limit the number of entries
  dm: limit the number of targets and parameter size area
parents 03503275 0a9bab39
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
#include "dm-ima.h" #include "dm-ima.h"
#define DM_RESERVED_MAX_IOS 1024 #define DM_RESERVED_MAX_IOS 1024
#define DM_MAX_TARGETS 1048576
#define DM_MAX_TARGET_PARAMS 1024
struct dm_io; struct dm_io;
......
...@@ -73,10 +73,8 @@ struct dm_crypt_io { ...@@ -73,10 +73,8 @@ struct dm_crypt_io {
struct bio *base_bio; struct bio *base_bio;
u8 *integrity_metadata; u8 *integrity_metadata;
bool integrity_metadata_from_pool:1; bool integrity_metadata_from_pool:1;
bool in_tasklet:1;
struct work_struct work; struct work_struct work;
struct tasklet_struct tasklet;
struct convert_context ctx; struct convert_context ctx;
...@@ -1762,7 +1760,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, ...@@ -1762,7 +1760,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
io->ctx.r.req = NULL; io->ctx.r.req = NULL;
io->integrity_metadata = NULL; io->integrity_metadata = NULL;
io->integrity_metadata_from_pool = false; io->integrity_metadata_from_pool = false;
io->in_tasklet = false;
atomic_set(&io->io_pending, 0); atomic_set(&io->io_pending, 0);
} }
...@@ -1771,13 +1768,6 @@ static void crypt_inc_pending(struct dm_crypt_io *io) ...@@ -1771,13 +1768,6 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
atomic_inc(&io->io_pending); atomic_inc(&io->io_pending);
} }
static void kcryptd_io_bio_endio(struct work_struct *work)
{
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
bio_endio(io->base_bio);
}
/* /*
* One of the bios was finished. Check for completion of * One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer. * the whole request and correctly clean up the buffer.
...@@ -1801,20 +1791,6 @@ static void crypt_dec_pending(struct dm_crypt_io *io) ...@@ -1801,20 +1791,6 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
base_bio->bi_status = error; base_bio->bi_status = error;
/*
* If we are running this function from our tasklet,
* we can't call bio_endio() here, because it will call
* clone_endio() from dm.c, which in turn will
* free the current struct dm_crypt_io structure with
* our tasklet. In this case we need to delay bio_endio()
* execution to after the tasklet is done and dequeued.
*/
if (io->in_tasklet) {
INIT_WORK(&io->work, kcryptd_io_bio_endio);
queue_work(cc->io_queue, &io->work);
return;
}
bio_endio(base_bio); bio_endio(base_bio);
} }
...@@ -2246,11 +2222,6 @@ static void kcryptd_crypt(struct work_struct *work) ...@@ -2246,11 +2222,6 @@ static void kcryptd_crypt(struct work_struct *work)
kcryptd_crypt_write_convert(io); kcryptd_crypt_write_convert(io);
} }
static void kcryptd_crypt_tasklet(unsigned long work)
{
kcryptd_crypt((struct work_struct *)work);
}
static void kcryptd_queue_crypt(struct dm_crypt_io *io) static void kcryptd_queue_crypt(struct dm_crypt_io *io)
{ {
struct crypt_config *cc = io->cc; struct crypt_config *cc = io->cc;
...@@ -2262,16 +2233,11 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io) ...@@ -2262,16 +2233,11 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
* irqs_disabled(): the kernel may run some IO completion from the idle thread, but * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
* it is being executed with irqs disabled. * it is being executed with irqs disabled.
*/ */
if (in_hardirq() || irqs_disabled()) { if (!(in_hardirq() || irqs_disabled())) {
io->in_tasklet = true;
tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
tasklet_schedule(&io->tasklet);
return;
}
kcryptd_crypt(&io->work); kcryptd_crypt(&io->work);
return; return;
} }
}
INIT_WORK(&io->work, kcryptd_crypt); INIT_WORK(&io->work, kcryptd_crypt);
queue_work(cc->crypt_queue, &io->work); queue_work(cc->crypt_queue, &io->work);
......
...@@ -1941,7 +1941,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern ...@@ -1941,7 +1941,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
minimum_data_size - sizeof(param_kernel->version))) minimum_data_size - sizeof(param_kernel->version)))
return -EFAULT; return -EFAULT;
if (param_kernel->data_size < minimum_data_size) { if (unlikely(param_kernel->data_size < minimum_data_size) ||
unlikely(param_kernel->data_size > DM_MAX_TARGETS * DM_MAX_TARGET_PARAMS)) {
DMERR("Invalid data size in the ioctl structure: %u", DMERR("Invalid data size in the ioctl structure: %u",
param_kernel->data_size); param_kernel->data_size);
return -EINVAL; return -EINVAL;
......
...@@ -66,6 +66,9 @@ struct dm_stats_last_position { ...@@ -66,6 +66,9 @@ struct dm_stats_last_position {
unsigned int last_rw; unsigned int last_rw;
}; };
#define DM_STAT_MAX_ENTRIES 8388608
#define DM_STAT_MAX_HISTOGRAM_ENTRIES 134217728
/* /*
* A typo on the command line could possibly make the kernel run out of memory * A typo on the command line could possibly make the kernel run out of memory
* and crash. To prevent the crash we account all used memory. We fail if we * and crash. To prevent the crash we account all used memory. We fail if we
...@@ -285,6 +288,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, ...@@ -285,6 +288,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1)) if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
return -EOVERFLOW; return -EOVERFLOW;
if (n_entries > DM_STAT_MAX_ENTRIES)
return -EOVERFLOW;
shared_alloc_size = struct_size(s, stat_shared, n_entries); shared_alloc_size = struct_size(s, stat_shared, n_entries);
if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries) if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
return -EOVERFLOW; return -EOVERFLOW;
...@@ -297,6 +303,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, ...@@ -297,6 +303,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long)) if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
return -EOVERFLOW; return -EOVERFLOW;
if ((n_histogram_entries + 1) * (size_t)n_entries > DM_STAT_MAX_HISTOGRAM_ENTRIES)
return -EOVERFLOW;
if (!check_shared_memory(shared_alloc_size + histogram_alloc_size + if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size))) num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
return -ENOMEM; return -ENOMEM;
......
...@@ -129,7 +129,12 @@ static int alloc_targets(struct dm_table *t, unsigned int num) ...@@ -129,7 +129,12 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
int dm_table_create(struct dm_table **result, blk_mode_t mode, int dm_table_create(struct dm_table **result, blk_mode_t mode,
unsigned int num_targets, struct mapped_device *md) unsigned int num_targets, struct mapped_device *md)
{ {
struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); struct dm_table *t;
if (num_targets > DM_MAX_TARGETS)
return -EOVERFLOW;
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (!t) if (!t)
return -ENOMEM; return -ENOMEM;
...@@ -144,7 +149,7 @@ int dm_table_create(struct dm_table **result, blk_mode_t mode, ...@@ -144,7 +149,7 @@ int dm_table_create(struct dm_table **result, blk_mode_t mode,
if (!num_targets) { if (!num_targets) {
kfree(t); kfree(t);
return -ENOMEM; return -EOVERFLOW;
} }
if (alloc_targets(t, num_targets)) { if (alloc_targets(t, num_targets)) {
......
...@@ -645,23 +645,6 @@ static void verity_work(struct work_struct *w) ...@@ -645,23 +645,6 @@ static void verity_work(struct work_struct *w)
verity_finish_io(io, errno_to_blk_status(verity_verify_io(io))); verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
} }
static void verity_tasklet(unsigned long data)
{
struct dm_verity_io *io = (struct dm_verity_io *)data;
int err;
io->in_tasklet = true;
err = verity_verify_io(io);
if (err == -EAGAIN || err == -ENOMEM) {
/* fallback to retrying with work-queue */
INIT_WORK(&io->work, verity_work);
queue_work(io->v->verify_wq, &io->work);
return;
}
verity_finish_io(io, errno_to_blk_status(err));
}
static void verity_end_io(struct bio *bio) static void verity_end_io(struct bio *bio)
{ {
struct dm_verity_io *io = bio->bi_private; struct dm_verity_io *io = bio->bi_private;
...@@ -674,13 +657,8 @@ static void verity_end_io(struct bio *bio) ...@@ -674,13 +657,8 @@ static void verity_end_io(struct bio *bio)
return; return;
} }
if (static_branch_unlikely(&use_tasklet_enabled) && io->v->use_tasklet) {
tasklet_init(&io->tasklet, verity_tasklet, (unsigned long)io);
tasklet_schedule(&io->tasklet);
} else {
INIT_WORK(&io->work, verity_work); INIT_WORK(&io->work, verity_work);
queue_work(io->v->verify_wq, &io->work); queue_work(io->v->verify_wq, &io->work);
}
} }
/* /*
......
...@@ -83,7 +83,6 @@ struct dm_verity_io { ...@@ -83,7 +83,6 @@ struct dm_verity_io {
struct bvec_iter iter; struct bvec_iter iter;
struct work_struct work; struct work_struct work;
struct tasklet_struct tasklet;
/* /*
* Three variably-size fields follow this struct: * Three variably-size fields follow this struct:
......
...@@ -299,7 +299,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) ...@@ -299,7 +299,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
long i; long i;
wc->memory_map = NULL; wc->memory_map = NULL;
pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL); pages = vmalloc_array(p, sizeof(struct page *));
if (!pages) { if (!pages) {
r = -ENOMEM; r = -ENOMEM;
goto err2; goto err2;
...@@ -330,7 +330,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) ...@@ -330,7 +330,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
r = -ENOMEM; r = -ENOMEM;
goto err3; goto err3;
} }
kvfree(pages); vfree(pages);
wc->memory_vmapped = true; wc->memory_vmapped = true;
} }
...@@ -341,7 +341,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) ...@@ -341,7 +341,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
return 0; return 0;
err3: err3:
kvfree(pages); vfree(pages);
err2: err2:
dax_read_unlock(id); dax_read_unlock(id);
err1: err1:
...@@ -962,7 +962,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc) ...@@ -962,7 +962,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
if (wc->entries) if (wc->entries)
return 0; return 0;
wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks)); wc->entries = vmalloc_array(wc->n_blocks, sizeof(struct wc_entry));
if (!wc->entries) if (!wc->entries)
return -ENOMEM; return -ENOMEM;
for (b = 0; b < wc->n_blocks; b++) { for (b = 0; b < wc->n_blocks; b++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment