Commit cf2f1abf authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer

dm crypt: don't allocate pages for a partial request

Change crypt_alloc_buffer so that it only ever allocates pages for a
full request.  This is a prerequisite for the commit "dm crypt: offload
writes to thread".

This change simplifies the dm-crypt code at the expense of reduced
throughput in low memory conditions (where allocation for a partial
request is most useful).

Note: the next commit ("dm crypt: avoid deadlock in mempools") is needed
to fix a theoretical deadlock.
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent f3396c58
...@@ -58,7 +58,6 @@ struct dm_crypt_io { ...@@ -58,7 +58,6 @@ struct dm_crypt_io {
atomic_t io_pending; atomic_t io_pending;
int error; int error;
sector_t sector; sector_t sector;
struct dm_crypt_io *base_io;
} CRYPTO_MINALIGN_ATTR; } CRYPTO_MINALIGN_ATTR;
struct dm_crypt_request { struct dm_crypt_request {
...@@ -172,7 +171,6 @@ struct crypt_config { ...@@ -172,7 +171,6 @@ struct crypt_config {
}; };
#define MIN_IOS 16 #define MIN_IOS 16
#define MIN_POOL_PAGES 32
static struct kmem_cache *_crypt_io_pool; static struct kmem_cache *_crypt_io_pool;
...@@ -946,14 +944,13 @@ static int crypt_convert(struct crypt_config *cc, ...@@ -946,14 +944,13 @@ static int crypt_convert(struct crypt_config *cc,
return 0; return 0;
} }
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
/* /*
* Generate a new unfragmented bio with the given size * Generate a new unfragmented bio with the given size
* This should never violate the device limitations * This should never violate the device limitations
* May return a smaller bio when running out of pages, indicated by
* *out_of_pages set to 1.
*/ */
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
unsigned *out_of_pages)
{ {
struct crypt_config *cc = io->cc; struct crypt_config *cc = io->cc;
struct bio *clone; struct bio *clone;
...@@ -961,41 +958,27 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, ...@@ -961,41 +958,27 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
unsigned i, len; unsigned i, len;
struct page *page; struct page *page;
struct bio_vec *bvec;
clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
if (!clone) if (!clone)
return NULL; return NULL;
clone_init(io, clone); clone_init(io, clone);
*out_of_pages = 0;
for (i = 0; i < nr_iovecs; i++) { for (i = 0; i < nr_iovecs; i++) {
page = mempool_alloc(cc->page_pool, gfp_mask); page = mempool_alloc(cc->page_pool, gfp_mask);
if (!page) {
*out_of_pages = 1;
break;
}
/*
* If additional pages cannot be allocated without waiting,
* return a partially-allocated bio. The caller will then try
* to allocate more bios while submitting this partial bio.
*/
gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
len = (size > PAGE_SIZE) ? PAGE_SIZE : size; len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
if (!bio_add_page(clone, page, len, 0)) { bvec = &clone->bi_io_vec[clone->bi_vcnt++];
mempool_free(page, cc->page_pool); bvec->bv_page = page;
break; bvec->bv_len = len;
} bvec->bv_offset = 0;
size -= len; clone->bi_iter.bi_size += len;
}
if (!clone->bi_iter.bi_size) { size -= len;
bio_put(clone);
return NULL;
} }
return clone; return clone;
...@@ -1020,7 +1003,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, ...@@ -1020,7 +1003,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
io->base_bio = bio; io->base_bio = bio;
io->sector = sector; io->sector = sector;
io->error = 0; io->error = 0;
io->base_io = NULL;
io->ctx.req = NULL; io->ctx.req = NULL;
atomic_set(&io->io_pending, 0); atomic_set(&io->io_pending, 0);
} }
...@@ -1033,13 +1015,11 @@ static void crypt_inc_pending(struct dm_crypt_io *io) ...@@ -1033,13 +1015,11 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
/* /*
* One of the bios was finished. Check for completion of * One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer. * the whole request and correctly clean up the buffer.
* If base_io is set, wait for the last fragment to complete.
*/ */
static void crypt_dec_pending(struct dm_crypt_io *io) static void crypt_dec_pending(struct dm_crypt_io *io)
{ {
struct crypt_config *cc = io->cc; struct crypt_config *cc = io->cc;
struct bio *base_bio = io->base_bio; struct bio *base_bio = io->base_bio;
struct dm_crypt_io *base_io = io->base_io;
int error = io->error; int error = io->error;
if (!atomic_dec_and_test(&io->io_pending)) if (!atomic_dec_and_test(&io->io_pending))
...@@ -1050,13 +1030,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io) ...@@ -1050,13 +1030,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size)) if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size))
mempool_free(io, cc->io_pool); mempool_free(io, cc->io_pool);
if (likely(!base_io)) bio_endio(base_bio, error);
bio_endio(base_bio, error);
else {
if (error && !base_io->error)
base_io->error = error;
crypt_dec_pending(base_io);
}
} }
/* /*
...@@ -1192,10 +1166,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) ...@@ -1192,10 +1166,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{ {
struct crypt_config *cc = io->cc; struct crypt_config *cc = io->cc;
struct bio *clone; struct bio *clone;
struct dm_crypt_io *new_io;
int crypt_finished; int crypt_finished;
unsigned out_of_pages = 0;
unsigned remaining = io->base_bio->bi_iter.bi_size;
sector_t sector = io->sector; sector_t sector = io->sector;
int r; int r;
...@@ -1205,80 +1176,30 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) ...@@ -1205,80 +1176,30 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
crypt_inc_pending(io); crypt_inc_pending(io);
crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
/* clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
* The allocated buffers can be smaller than the whole bio, if (unlikely(!clone)) {
* so repeat the whole process until all the data can be handled. io->error = -EIO;
*/ goto dec;
while (remaining) { }
clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
if (unlikely(!clone)) {
io->error = -ENOMEM;
break;
}
io->ctx.bio_out = clone;
io->ctx.iter_out = clone->bi_iter;
remaining -= clone->bi_iter.bi_size;
sector += bio_sectors(clone);
crypt_inc_pending(io);
r = crypt_convert(cc, &io->ctx);
if (r < 0)
io->error = -EIO;
crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
/* Encryption was already finished, submit io now */
if (crypt_finished) {
kcryptd_crypt_write_io_submit(io, 0);
/*
* If there was an error, do not try next fragments.
* For async, error is processed in async handler.
*/
if (unlikely(r < 0))
break;
io->sector = sector; io->ctx.bio_out = clone;
} io->ctx.iter_out = clone->bi_iter;
/* sector += bio_sectors(clone);
* Out of memory -> run queues
* But don't wait if split was due to the io size restriction
*/
if (unlikely(out_of_pages))
congestion_wait(BLK_RW_ASYNC, HZ/100);
/* crypt_inc_pending(io);
* With async crypto it is unsafe to share the crypto context r = crypt_convert(cc, &io->ctx);
* between fragments, so switch to a new dm_crypt_io structure. if (r)
*/ io->error = -EIO;
if (unlikely(!crypt_finished && remaining)) { crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
new_io = mempool_alloc(cc->io_pool, GFP_NOIO);
crypt_io_init(new_io, io->cc, io->base_bio, sector);
crypt_inc_pending(new_io);
crypt_convert_init(cc, &new_io->ctx, NULL,
io->base_bio, sector);
new_io->ctx.iter_in = io->ctx.iter_in;
/*
* Fragments after the first use the base_io
* pending count.
*/
if (!io->base_io)
new_io->base_io = io;
else {
new_io->base_io = io->base_io;
crypt_inc_pending(io->base_io);
crypt_dec_pending(io);
}
io = new_io; /* Encryption was already finished, submit io now */
} if (crypt_finished) {
kcryptd_crypt_write_io_submit(io, 0);
io->sector = sector;
} }
dec:
crypt_dec_pending(io); crypt_dec_pending(io);
} }
...@@ -1746,7 +1667,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -1746,7 +1667,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size, sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
ARCH_KMALLOC_MINALIGN); ARCH_KMALLOC_MINALIGN);
cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
if (!cc->page_pool) { if (!cc->page_pool) {
ti->error = "Cannot allocate page mempool"; ti->error = "Cannot allocate page mempool";
goto bad; goto bad;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment