Commit 298a9fa0 authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer

dm crypt: use per-bio data

Change dm-crypt so that it uses auxiliary data allocated with the bio.

Dm-crypt requires two allocations per request - struct dm_crypt_io and
struct ablkcipher_request (with other data appended to it).  It
previously only used mempool allocations.

Some requests may require more dm_crypt_ios and ablkcipher_requests,
however most requests need just one of each of these two structures to
complete.

This patch changes it so that the first dm_crypt_io and ablkcipher_request
are allocated with the bio (using target per_bio_data_size option).  If
the request needs additional values, they are allocated from the mempool.
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 6a241483
...@@ -59,7 +59,7 @@ struct dm_crypt_io { ...@@ -59,7 +59,7 @@ struct dm_crypt_io {
int error; int error;
sector_t sector; sector_t sector;
struct dm_crypt_io *base_io; struct dm_crypt_io *base_io;
}; } CRYPTO_MINALIGN_ATTR;
struct dm_crypt_request { struct dm_crypt_request {
struct convert_context *ctx; struct convert_context *ctx;
...@@ -162,6 +162,8 @@ struct crypt_config { ...@@ -162,6 +162,8 @@ struct crypt_config {
*/ */
unsigned int dmreq_start; unsigned int dmreq_start;
unsigned int per_bio_data_size;
unsigned long flags; unsigned long flags;
unsigned int key_size; unsigned int key_size;
unsigned int key_parts; /* independent parts in key buffer */ unsigned int key_parts; /* independent parts in key buffer */
...@@ -895,6 +897,15 @@ static void crypt_alloc_req(struct crypt_config *cc, ...@@ -895,6 +897,15 @@ static void crypt_alloc_req(struct crypt_config *cc,
kcryptd_async_done, dmreq_of_req(cc, ctx->req)); kcryptd_async_done, dmreq_of_req(cc, ctx->req));
} }
static void crypt_free_req(struct crypt_config *cc,
struct ablkcipher_request *req, struct bio *base_bio)
{
struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
if ((struct ablkcipher_request *)(io + 1) != req)
mempool_free(req, cc->req_pool);
}
/* /*
* Encrypt / decrypt data from one bio to another one (can be the same one) * Encrypt / decrypt data from one bio to another one (can be the same one)
*/ */
...@@ -1008,12 +1019,9 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) ...@@ -1008,12 +1019,9 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
} }
} }
static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
struct bio *bio, sector_t sector) struct bio *bio, sector_t sector)
{ {
struct dm_crypt_io *io;
io = mempool_alloc(cc->io_pool, GFP_NOIO);
io->cc = cc; io->cc = cc;
io->base_bio = bio; io->base_bio = bio;
io->sector = sector; io->sector = sector;
...@@ -1021,8 +1029,6 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, ...@@ -1021,8 +1029,6 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
io->base_io = NULL; io->base_io = NULL;
io->ctx.req = NULL; io->ctx.req = NULL;
atomic_set(&io->io_pending, 0); atomic_set(&io->io_pending, 0);
return io;
} }
static void crypt_inc_pending(struct dm_crypt_io *io) static void crypt_inc_pending(struct dm_crypt_io *io)
...@@ -1046,7 +1052,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io) ...@@ -1046,7 +1052,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
return; return;
if (io->ctx.req) if (io->ctx.req)
mempool_free(io->ctx.req, cc->req_pool); crypt_free_req(cc, io->ctx.req, base_bio);
if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size))
mempool_free(io, cc->io_pool); mempool_free(io, cc->io_pool);
if (likely(!base_io)) if (likely(!base_io))
...@@ -1255,8 +1262,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) ...@@ -1255,8 +1262,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
* between fragments, so switch to a new dm_crypt_io structure. * between fragments, so switch to a new dm_crypt_io structure.
*/ */
if (unlikely(!crypt_finished && remaining)) { if (unlikely(!crypt_finished && remaining)) {
new_io = crypt_io_alloc(io->cc, io->base_bio, new_io = mempool_alloc(cc->io_pool, GFP_NOIO);
sector); crypt_io_init(new_io, io->cc, io->base_bio, sector);
crypt_inc_pending(new_io); crypt_inc_pending(new_io);
crypt_convert_init(cc, &new_io->ctx, NULL, crypt_convert_init(cc, &new_io->ctx, NULL,
io->base_bio, sector); io->base_bio, sector);
...@@ -1325,7 +1332,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, ...@@ -1325,7 +1332,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
if (error < 0) if (error < 0)
io->error = -EIO; io->error = -EIO;
mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
if (!atomic_dec_and_test(&ctx->cc_pending)) if (!atomic_dec_and_test(&ctx->cc_pending))
return; return;
...@@ -1728,6 +1735,10 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -1728,6 +1735,10 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad; goto bad;
} }
cc->per_bio_data_size = ti->per_bio_data_size =
sizeof(struct dm_crypt_io) + cc->dmreq_start +
sizeof(struct dm_crypt_request) + cc->iv_size;
cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
if (!cc->page_pool) { if (!cc->page_pool) {
ti->error = "Cannot allocate page mempool"; ti->error = "Cannot allocate page mempool";
...@@ -1824,7 +1835,9 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) ...@@ -1824,7 +1835,9 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
} }
io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); io = dm_per_bio_data(bio, cc->per_bio_data_size);
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
io->ctx.req = (struct ablkcipher_request *)(io + 1);
if (bio_data_dir(io->base_bio) == READ) { if (bio_data_dir(io->base_bio) == READ) {
if (kcryptd_io_read(io, GFP_NOWAIT)) if (kcryptd_io_read(io, GFP_NOWAIT))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment