Commit 0c1e16cd authored by Stephan Mueller's avatar Stephan Mueller Committed by Herbert Xu

crypto: algif_aead - fix AEAD tag memory handling

For encryption, the AEAD ciphers require AAD || PT as input and generate
AAD || CT || Tag as output and vice versa for decryption. Prior to this
patch, the AF_ALG interface for AEAD ciphers requires the buffer to be
present as input for encryption. Similarly, the output buffer for
decryption required the presence of the tag buffer too. This implies
that the kernel reads / writes data buffers from/to kernel space
even though this operation is not required.

This patch changes the AF_ALG AEAD interface to be consistent with the
in-kernel AEAD cipher requirements.

Due to this handling, he changes are transparent to user space with one
exception: the return code of recv indicates the mount of output buffer.
That output buffer has a different size compared to before the patch
which implies that the return code of recv will also be different.
For example, a decryption operation uses 16 bytes AAD, 16 bytes CT and
16 bytes tag, the AF_ALG AEAD interface before showed a recv return
code of 48 (bytes) whereas after this patch, the return code is 32
since the tag is not returned any more.
Reported-by: default avatarMat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: default avatarStephan Mueller <smueller@chronox.de>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 39eaf759
...@@ -81,7 +81,11 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx) ...@@ -81,7 +81,11 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx)
{ {
unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
return ctx->used >= ctx->aead_assoclen + as; /*
* The minimum amount of memory needed for an AEAD cipher is
* the AAD and in case of decryption the tag.
*/
return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
} }
static void aead_reset_ctx(struct aead_ctx *ctx) static void aead_reset_ctx(struct aead_ctx *ctx)
...@@ -426,12 +430,15 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, ...@@ -426,12 +430,15 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
goto unlock; goto unlock;
} }
used = ctx->used;
outlen = used;
if (!aead_sufficient_data(ctx)) if (!aead_sufficient_data(ctx))
goto unlock; goto unlock;
used = ctx->used;
if (ctx->enc)
outlen = used + as;
else
outlen = used - as;
req = sock_kmalloc(sk, reqlen, GFP_KERNEL); req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
if (unlikely(!req)) if (unlikely(!req))
goto unlock; goto unlock;
...@@ -445,7 +452,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, ...@@ -445,7 +452,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
aead_request_set_ad(req, ctx->aead_assoclen); aead_request_set_ad(req, ctx->aead_assoclen);
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
aead_async_cb, sk); aead_async_cb, sk);
used -= ctx->aead_assoclen + (ctx->enc ? as : 0); used -= ctx->aead_assoclen;
/* take over all tx sgls from ctx */ /* take over all tx sgls from ctx */
areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur, areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur,
...@@ -461,7 +468,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, ...@@ -461,7 +468,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
areq->tsgls = sgl->cur; areq->tsgls = sgl->cur;
/* create rx sgls */ /* create rx sgls */
while (iov_iter_count(&msg->msg_iter)) { while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
(outlen - usedpages)); (outlen - usedpages));
...@@ -491,16 +498,14 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, ...@@ -491,16 +498,14 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
last_rsgl = rsgl; last_rsgl = rsgl;
/* we do not need more iovecs as we have sufficient memory */
if (outlen <= usedpages)
break;
iov_iter_advance(&msg->msg_iter, err); iov_iter_advance(&msg->msg_iter, err);
} }
err = -EINVAL;
/* ensure output buffer is sufficiently large */ /* ensure output buffer is sufficiently large */
if (usedpages < outlen) if (usedpages < outlen) {
goto free; err = -EINVAL;
goto unlock;
}
aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used, aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
areq->iv); areq->iv);
...@@ -571,6 +576,7 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) ...@@ -571,6 +576,7 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
goto unlock; goto unlock;
} }
/* data length provided by caller via sendmsg/sendpage */
used = ctx->used; used = ctx->used;
/* /*
...@@ -585,16 +591,27 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) ...@@ -585,16 +591,27 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
if (!aead_sufficient_data(ctx)) if (!aead_sufficient_data(ctx))
goto unlock; goto unlock;
outlen = used; /*
* Calculate the minimum output buffer size holding the result of the
* cipher operation. When encrypting data, the receiving buffer is
* larger by the tag length compared to the input buffer as the
* encryption operation generates the tag. For decryption, the input
* buffer provides the tag which is consumed resulting in only the
* plaintext without a buffer for the tag returned to the caller.
*/
if (ctx->enc)
outlen = used + as;
else
outlen = used - as;
/* /*
* The cipher operation input data is reduced by the associated data * The cipher operation input data is reduced by the associated data
* length as this data is processed separately later on. * length as this data is processed separately later on.
*/ */
used -= ctx->aead_assoclen + (ctx->enc ? as : 0); used -= ctx->aead_assoclen;
/* convert iovecs of output buffers into scatterlists */ /* convert iovecs of output buffers into scatterlists */
while (iov_iter_count(&msg->msg_iter)) { while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
(outlen - usedpages)); (outlen - usedpages));
...@@ -621,16 +638,14 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) ...@@ -621,16 +638,14 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
last_rsgl = rsgl; last_rsgl = rsgl;
/* we do not need more iovecs as we have sufficient memory */
if (outlen <= usedpages)
break;
iov_iter_advance(&msg->msg_iter, err); iov_iter_advance(&msg->msg_iter, err);
} }
err = -EINVAL;
/* ensure output buffer is sufficiently large */ /* ensure output buffer is sufficiently large */
if (usedpages < outlen) if (usedpages < outlen) {
err = -EINVAL;
goto unlock; goto unlock;
}
sg_mark_end(sgl->sg + sgl->cur - 1); sg_mark_end(sgl->sg + sgl->cur - 1);
aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg, aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment