Commit 5d2a5172 authored by Eric Biggers's avatar Eric Biggers Committed by Greg Kroah-Hartman

crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP

commit ba6771c0 upstream.

The x86 AEGIS implementations all fail the improved AEAD tests because
they produce the wrong result with some data layouts.  The issue is that
they assume that if the skcipher_walk API gives 'nbytes' not aligned to
the walksize (a.k.a. walk.stride), then it is the end of the data.  In
fact, this can happen before the end.

Also, when the CRYPTO_TFM_REQ_MAY_SLEEP flag is given, they can
incorrectly sleep in the skcipher_walk_*() functions while preemption
has been disabled by kernel_fpu_begin().

Fix these bugs.

Fixes: 1d373d4e ("crypto: x86 - Add optimized AEGIS implementations")
Cc: <stable@vger.kernel.org> # v4.18+
Cc: Ondrej Mosnacek <omosnace@redhat.com>
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Reviewed-by: default avatarOndrej Mosnacek <omosnace@redhat.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 574c19d9
...@@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad( ...@@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad(
} }
static void crypto_aegis128_aesni_process_crypt( static void crypto_aegis128_aesni_process_crypt(
struct aegis_state *state, struct aead_request *req, struct aegis_state *state, struct skcipher_walk *walk,
const struct aegis_crypt_ops *ops) const struct aegis_crypt_ops *ops)
{ {
struct skcipher_walk walk; while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
u8 *src, *dst; ops->crypt_blocks(state,
unsigned int chunksize, base; round_down(walk->nbytes, AEGIS128_BLOCK_SIZE),
walk->src.virt.addr, walk->dst.virt.addr);
ops->skcipher_walk_init(&walk, req, false); skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
}
while (walk.nbytes) {
src = walk.src.virt.addr;
dst = walk.dst.virt.addr;
chunksize = walk.nbytes;
ops->crypt_blocks(state, chunksize, src, dst);
base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1);
src += base;
dst += base;
chunksize &= AEGIS128_BLOCK_SIZE - 1;
if (chunksize > 0)
ops->crypt_tail(state, chunksize, src, dst);
skcipher_walk_done(&walk, 0); if (walk->nbytes) {
ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
walk->dst.virt.addr);
skcipher_walk_done(walk, 0);
} }
} }
...@@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req, ...@@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req,
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm); struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
struct skcipher_walk walk;
struct aegis_state state; struct aegis_state state;
ops->skcipher_walk_init(&walk, req, true);
kernel_fpu_begin(); kernel_fpu_begin();
crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv); crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen); crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
crypto_aegis128_aesni_process_crypt(&state, req, ops); crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen); crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end(); kernel_fpu_end();
......
...@@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad( ...@@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad(
} }
static void crypto_aegis128l_aesni_process_crypt( static void crypto_aegis128l_aesni_process_crypt(
struct aegis_state *state, struct aead_request *req, struct aegis_state *state, struct skcipher_walk *walk,
const struct aegis_crypt_ops *ops) const struct aegis_crypt_ops *ops)
{ {
struct skcipher_walk walk; while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) {
u8 *src, *dst; ops->crypt_blocks(state, round_down(walk->nbytes,
unsigned int chunksize, base; AEGIS128L_BLOCK_SIZE),
walk->src.virt.addr, walk->dst.virt.addr);
ops->skcipher_walk_init(&walk, req, false); skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE);
}
while (walk.nbytes) {
src = walk.src.virt.addr;
dst = walk.dst.virt.addr;
chunksize = walk.nbytes;
ops->crypt_blocks(state, chunksize, src, dst);
base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1);
src += base;
dst += base;
chunksize &= AEGIS128L_BLOCK_SIZE - 1;
if (chunksize > 0)
ops->crypt_tail(state, chunksize, src, dst);
skcipher_walk_done(&walk, 0); if (walk->nbytes) {
ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
walk->dst.virt.addr);
skcipher_walk_done(walk, 0);
} }
} }
...@@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req, ...@@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req,
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm); struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm);
struct skcipher_walk walk;
struct aegis_state state; struct aegis_state state;
ops->skcipher_walk_init(&walk, req, true);
kernel_fpu_begin(); kernel_fpu_begin();
crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv); crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv);
crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen); crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen);
crypto_aegis128l_aesni_process_crypt(&state, req, ops); crypto_aegis128l_aesni_process_crypt(&state, &walk, ops);
crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen); crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end(); kernel_fpu_end();
......
...@@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad( ...@@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad(
} }
static void crypto_aegis256_aesni_process_crypt( static void crypto_aegis256_aesni_process_crypt(
struct aegis_state *state, struct aead_request *req, struct aegis_state *state, struct skcipher_walk *walk,
const struct aegis_crypt_ops *ops) const struct aegis_crypt_ops *ops)
{ {
struct skcipher_walk walk; while (walk->nbytes >= AEGIS256_BLOCK_SIZE) {
u8 *src, *dst; ops->crypt_blocks(state,
unsigned int chunksize, base; round_down(walk->nbytes, AEGIS256_BLOCK_SIZE),
walk->src.virt.addr, walk->dst.virt.addr);
ops->skcipher_walk_init(&walk, req, false); skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE);
}
while (walk.nbytes) {
src = walk.src.virt.addr;
dst = walk.dst.virt.addr;
chunksize = walk.nbytes;
ops->crypt_blocks(state, chunksize, src, dst);
base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1);
src += base;
dst += base;
chunksize &= AEGIS256_BLOCK_SIZE - 1;
if (chunksize > 0)
ops->crypt_tail(state, chunksize, src, dst);
skcipher_walk_done(&walk, 0); if (walk->nbytes) {
ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
walk->dst.virt.addr);
skcipher_walk_done(walk, 0);
} }
} }
...@@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req, ...@@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req,
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm); struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm);
struct skcipher_walk walk;
struct aegis_state state; struct aegis_state state;
ops->skcipher_walk_init(&walk, req, true);
kernel_fpu_begin(); kernel_fpu_begin();
crypto_aegis256_aesni_init(&state, ctx->key, req->iv); crypto_aegis256_aesni_init(&state, ctx->key, req->iv);
crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen); crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen);
crypto_aegis256_aesni_process_crypt(&state, req, ops); crypto_aegis256_aesni_process_crypt(&state, &walk, ops);
crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen); crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end(); kernel_fpu_end();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment