Commit 83c83e65 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: aesni - refactor scatterlist processing

Currently, the gcm(aes-ni) driver open codes the scatterlist handling
that is encapsulated by the skcipher walk API. So let's switch to that
instead.

Also, move the handling at the end of gcmaes_crypt_by_sg() that is
dependent on whether we are encrypting or decrypting into the callers,
which always do one or the other.
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 2694e23f
...@@ -638,25 +638,18 @@ static int generic_gcmaes_set_authsize(struct crypto_aead *tfm, ...@@ -638,25 +638,18 @@ static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
unsigned int assoclen, u8 *hash_subkey, unsigned int assoclen, u8 *hash_subkey,
u8 *iv, void *aes_ctx) u8 *iv, void *aes_ctx, u8 *auth_tag,
unsigned long auth_tag_len)
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm; const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8); u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN); struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
struct scatter_walk dst_sg_walk = {};
unsigned long left = req->cryptlen; unsigned long left = req->cryptlen;
unsigned long len, srclen, dstlen;
struct scatter_walk assoc_sg_walk; struct scatter_walk assoc_sg_walk;
struct scatter_walk src_sg_walk; struct skcipher_walk walk;
struct scatterlist src_start[2];
struct scatterlist dst_start[2];
struct scatterlist *src_sg;
struct scatterlist *dst_sg;
u8 *src, *dst, *assoc;
u8 *assocmem = NULL; u8 *assocmem = NULL;
u8 authTag[16]; u8 *assoc;
int err;
if (!enc) if (!enc)
left -= auth_tag_len; left -= auth_tag_len;
...@@ -683,61 +676,8 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, ...@@ -683,61 +676,8 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0); scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
} }
if (left) {
src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
scatterwalk_start(&src_sg_walk, src_sg);
if (req->src != req->dst) {
dst_sg = scatterwalk_ffwd(dst_start, req->dst,
req->assoclen);
scatterwalk_start(&dst_sg_walk, dst_sg);
}
}
kernel_fpu_begin(); kernel_fpu_begin();
gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen); gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
if (req->src != req->dst) {
while (left) {
src = scatterwalk_map(&src_sg_walk);
dst = scatterwalk_map(&dst_sg_walk);
srclen = scatterwalk_clamp(&src_sg_walk, left);
dstlen = scatterwalk_clamp(&dst_sg_walk, left);
len = min(srclen, dstlen);
if (len) {
if (enc)
gcm_tfm->enc_update(aes_ctx, data,
dst, src, len);
else
gcm_tfm->dec_update(aes_ctx, data,
dst, src, len);
}
left -= len;
scatterwalk_unmap(src);
scatterwalk_unmap(dst);
scatterwalk_advance(&src_sg_walk, len);
scatterwalk_advance(&dst_sg_walk, len);
scatterwalk_done(&src_sg_walk, 0, left);
scatterwalk_done(&dst_sg_walk, 1, left);
}
} else {
while (left) {
dst = src = scatterwalk_map(&src_sg_walk);
len = scatterwalk_clamp(&src_sg_walk, left);
if (len) {
if (enc)
gcm_tfm->enc_update(aes_ctx, data,
src, src, len);
else
gcm_tfm->dec_update(aes_ctx, data,
src, src, len);
}
left -= len;
scatterwalk_unmap(src);
scatterwalk_advance(&src_sg_walk, len);
scatterwalk_done(&src_sg_walk, 1, left);
}
}
gcm_tfm->finalize(aes_ctx, data, authTag, auth_tag_len);
kernel_fpu_end(); kernel_fpu_end();
if (!assocmem) if (!assocmem)
...@@ -745,24 +685,25 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, ...@@ -745,24 +685,25 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
else else
kfree(assocmem); kfree(assocmem);
if (!enc) { err = enc ? skcipher_walk_aead_encrypt(&walk, req, false)
u8 authTagMsg[16]; : skcipher_walk_aead_decrypt(&walk, req, false);
/* Copy out original authTag */ while (walk.nbytes > 0) {
scatterwalk_map_and_copy(authTagMsg, req->src, kernel_fpu_begin();
req->assoclen + req->cryptlen - (enc ? gcm_tfm->enc_update
auth_tag_len, : gcm_tfm->dec_update)(aes_ctx, data, walk.dst.virt.addr,
auth_tag_len, 0); walk.src.virt.addr, walk.nbytes);
kernel_fpu_end();
/* Compare generated tag with passed in tag. */ err = skcipher_walk_done(&walk, 0);
return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
-EBADMSG : 0;
} }
/* Copy in the authTag */ if (err)
scatterwalk_map_and_copy(authTag, req->dst, return err;
req->assoclen + req->cryptlen,
auth_tag_len, 1); kernel_fpu_begin();
gcm_tfm->finalize(aes_ctx, data, auth_tag, auth_tag_len);
kernel_fpu_end();
return 0; return 0;
} }
...@@ -770,15 +711,47 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, ...@@ -770,15 +711,47 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen, static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx) u8 *hash_subkey, u8 *iv, void *aes_ctx)
{ {
return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, struct crypto_aead *tfm = crypto_aead_reqtfm(req);
aes_ctx); unsigned long auth_tag_len = crypto_aead_authsize(tfm);
u8 auth_tag[16];
int err;
err = gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, aes_ctx,
auth_tag, auth_tag_len);
if (err)
return err;
scatterwalk_map_and_copy(auth_tag, req->dst,
req->assoclen + req->cryptlen,
auth_tag_len, 1);
return 0;
} }
static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen, static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx) u8 *hash_subkey, u8 *iv, void *aes_ctx)
{ {
return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, struct crypto_aead *tfm = crypto_aead_reqtfm(req);
aes_ctx); unsigned long auth_tag_len = crypto_aead_authsize(tfm);
u8 auth_tag_msg[16];
u8 auth_tag[16];
int err;
err = gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, aes_ctx,
auth_tag, auth_tag_len);
if (err)
return err;
/* Copy out original auth_tag */
scatterwalk_map_and_copy(auth_tag_msg, req->src,
req->assoclen + req->cryptlen - auth_tag_len,
auth_tag_len, 0);
/* Compare generated tag with passed in tag. */
if (crypto_memneq(auth_tag_msg, auth_tag, auth_tag_len)) {
memzero_explicit(auth_tag, sizeof(auth_tag));
return -EBADMSG;
}
return 0;
} }
static int helper_rfc4106_encrypt(struct aead_request *req) static int helper_rfc4106_encrypt(struct aead_request *req)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment