Commit 5ee4c5a9 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
 "This fixes the following problems:

   - regression in new XTS/LRW code when used with async crypto

   - long-standing bug in ahash API when used with certain algos

   - bogus memory dereference in async algif_aead with certain algos"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: algif_aead - Fix bogus request dereference in completion function
  crypto: ahash - Fix EINPROGRESS notification callback
  crypto: lrw - Fix use-after-free on EINPROGRESS
  crypto: xts - Fix use-after-free on EINPROGRESS
parents 20bb78f6 e6534aeb
...@@ -32,6 +32,7 @@ struct ahash_request_priv { ...@@ -32,6 +32,7 @@ struct ahash_request_priv {
crypto_completion_t complete; crypto_completion_t complete;
void *data; void *data;
u8 *result; u8 *result;
u32 flags;
void *ubuf[] CRYPTO_MINALIGN_ATTR; void *ubuf[] CRYPTO_MINALIGN_ATTR;
}; };
...@@ -253,6 +254,8 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) ...@@ -253,6 +254,8 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
priv->result = req->result; priv->result = req->result;
priv->complete = req->base.complete; priv->complete = req->base.complete;
priv->data = req->base.data; priv->data = req->base.data;
priv->flags = req->base.flags;
/* /*
* WARNING: We do not backup req->priv here! The req->priv * WARNING: We do not backup req->priv here! The req->priv
* is for internal use of the Crypto API and the * is for internal use of the Crypto API and the
...@@ -267,38 +270,44 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) ...@@ -267,38 +270,44 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
return 0; return 0;
} }
static void ahash_restore_req(struct ahash_request *req) static void ahash_restore_req(struct ahash_request *req, int err)
{ {
struct ahash_request_priv *priv = req->priv; struct ahash_request_priv *priv = req->priv;
if (!err)
memcpy(priv->result, req->result,
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
/* Restore the original crypto request. */ /* Restore the original crypto request. */
req->result = priv->result; req->result = priv->result;
req->base.complete = priv->complete;
req->base.data = priv->data; ahash_request_set_callback(req, priv->flags,
priv->complete, priv->data);
req->priv = NULL; req->priv = NULL;
/* Free the req->priv.priv from the ADJUSTED request. */ /* Free the req->priv.priv from the ADJUSTED request. */
kzfree(priv); kzfree(priv);
} }
static void ahash_op_unaligned_finish(struct ahash_request *req, int err) static void ahash_notify_einprogress(struct ahash_request *req)
{ {
struct ahash_request_priv *priv = req->priv; struct ahash_request_priv *priv = req->priv;
struct crypto_async_request oreq;
if (err == -EINPROGRESS) oreq.data = priv->data;
return;
if (!err)
memcpy(priv->result, req->result,
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
ahash_restore_req(req); priv->complete(&oreq, -EINPROGRESS);
} }
static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
{ {
struct ahash_request *areq = req->data; struct ahash_request *areq = req->data;
if (err == -EINPROGRESS) {
ahash_notify_einprogress(areq);
return;
}
/* /*
* Restore the original request, see ahash_op_unaligned() for what * Restore the original request, see ahash_op_unaligned() for what
* goes where. * goes where.
...@@ -309,7 +318,7 @@ static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) ...@@ -309,7 +318,7 @@ static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
*/ */
/* First copy req->result into req->priv.result */ /* First copy req->result into req->priv.result */
ahash_op_unaligned_finish(areq, err); ahash_restore_req(areq, err);
/* Complete the ORIGINAL request. */ /* Complete the ORIGINAL request. */
areq->base.complete(&areq->base, err); areq->base.complete(&areq->base, err);
...@@ -325,7 +334,12 @@ static int ahash_op_unaligned(struct ahash_request *req, ...@@ -325,7 +334,12 @@ static int ahash_op_unaligned(struct ahash_request *req,
return err; return err;
err = op(req); err = op(req);
ahash_op_unaligned_finish(req, err); if (err == -EINPROGRESS ||
(err == -EBUSY && (ahash_request_flags(req) &
CRYPTO_TFM_REQ_MAY_BACKLOG)))
return err;
ahash_restore_req(req, err);
return err; return err;
} }
...@@ -360,25 +374,14 @@ int crypto_ahash_digest(struct ahash_request *req) ...@@ -360,25 +374,14 @@ int crypto_ahash_digest(struct ahash_request *req)
} }
EXPORT_SYMBOL_GPL(crypto_ahash_digest); EXPORT_SYMBOL_GPL(crypto_ahash_digest);
static void ahash_def_finup_finish2(struct ahash_request *req, int err) static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
{ {
struct ahash_request_priv *priv = req->priv; struct ahash_request *areq = req->data;
if (err == -EINPROGRESS) if (err == -EINPROGRESS)
return; return;
if (!err) ahash_restore_req(areq, err);
memcpy(priv->result, req->result,
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
ahash_restore_req(req);
}
static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
ahash_def_finup_finish2(areq, err);
areq->base.complete(&areq->base, err); areq->base.complete(&areq->base, err);
} }
...@@ -389,11 +392,15 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err) ...@@ -389,11 +392,15 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
goto out; goto out;
req->base.complete = ahash_def_finup_done2; req->base.complete = ahash_def_finup_done2;
req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_ahash_reqtfm(req)->final(req); err = crypto_ahash_reqtfm(req)->final(req);
if (err == -EINPROGRESS ||
(err == -EBUSY && (ahash_request_flags(req) &
CRYPTO_TFM_REQ_MAY_BACKLOG)))
return err;
out: out:
ahash_def_finup_finish2(req, err); ahash_restore_req(req, err);
return err; return err;
} }
...@@ -401,7 +408,16 @@ static void ahash_def_finup_done1(struct crypto_async_request *req, int err) ...@@ -401,7 +408,16 @@ static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
{ {
struct ahash_request *areq = req->data; struct ahash_request *areq = req->data;
if (err == -EINPROGRESS) {
ahash_notify_einprogress(areq);
return;
}
areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
err = ahash_def_finup_finish1(areq, err); err = ahash_def_finup_finish1(areq, err);
if (areq->priv)
return;
areq->base.complete(&areq->base, err); areq->base.complete(&areq->base, err);
} }
...@@ -416,6 +432,11 @@ static int ahash_def_finup(struct ahash_request *req) ...@@ -416,6 +432,11 @@ static int ahash_def_finup(struct ahash_request *req)
return err; return err;
err = tfm->update(req); err = tfm->update(req);
if (err == -EINPROGRESS ||
(err == -EBUSY && (ahash_request_flags(req) &
CRYPTO_TFM_REQ_MAY_BACKLOG)))
return err;
return ahash_def_finup_finish1(req, err); return ahash_def_finup_finish1(req, err);
} }
......
...@@ -40,6 +40,7 @@ struct aead_async_req { ...@@ -40,6 +40,7 @@ struct aead_async_req {
struct aead_async_rsgl first_rsgl; struct aead_async_rsgl first_rsgl;
struct list_head list; struct list_head list;
struct kiocb *iocb; struct kiocb *iocb;
struct sock *sk;
unsigned int tsgls; unsigned int tsgls;
char iv[]; char iv[];
}; };
...@@ -379,12 +380,10 @@ static ssize_t aead_sendpage(struct socket *sock, struct page *page, ...@@ -379,12 +380,10 @@ static ssize_t aead_sendpage(struct socket *sock, struct page *page,
static void aead_async_cb(struct crypto_async_request *_req, int err) static void aead_async_cb(struct crypto_async_request *_req, int err)
{ {
struct sock *sk = _req->data; struct aead_request *req = _req->data;
struct alg_sock *ask = alg_sk(sk); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aead_ctx *ctx = ask->private;
struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
struct aead_request *req = aead_request_cast(_req);
struct aead_async_req *areq = GET_ASYM_REQ(req, tfm); struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
struct sock *sk = areq->sk;
struct scatterlist *sg = areq->tsgl; struct scatterlist *sg = areq->tsgl;
struct aead_async_rsgl *rsgl; struct aead_async_rsgl *rsgl;
struct kiocb *iocb = areq->iocb; struct kiocb *iocb = areq->iocb;
...@@ -447,11 +446,12 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, ...@@ -447,11 +446,12 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl)); memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
INIT_LIST_HEAD(&areq->list); INIT_LIST_HEAD(&areq->list);
areq->iocb = msg->msg_iocb; areq->iocb = msg->msg_iocb;
areq->sk = sk;
memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm)); memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
aead_request_set_tfm(req, tfm); aead_request_set_tfm(req, tfm);
aead_request_set_ad(req, ctx->aead_assoclen); aead_request_set_ad(req, ctx->aead_assoclen);
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
aead_async_cb, sk); aead_async_cb, req);
used -= ctx->aead_assoclen; used -= ctx->aead_assoclen;
/* take over all tx sgls from ctx */ /* take over all tx sgls from ctx */
......
...@@ -345,6 +345,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err) ...@@ -345,6 +345,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
struct rctx *rctx; struct rctx *rctx;
rctx = skcipher_request_ctx(req); rctx = skcipher_request_ctx(req);
if (err == -EINPROGRESS) {
if (rctx->left != req->cryptlen)
return;
goto out;
}
subreq = &rctx->subreq; subreq = &rctx->subreq;
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
...@@ -352,6 +359,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err) ...@@ -352,6 +359,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
if (rctx->left) if (rctx->left)
return; return;
out:
skcipher_request_complete(req, err); skcipher_request_complete(req, err);
} }
...@@ -389,6 +397,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err) ...@@ -389,6 +397,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
struct rctx *rctx; struct rctx *rctx;
rctx = skcipher_request_ctx(req); rctx = skcipher_request_ctx(req);
if (err == -EINPROGRESS) {
if (rctx->left != req->cryptlen)
return;
goto out;
}
subreq = &rctx->subreq; subreq = &rctx->subreq;
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
...@@ -396,6 +411,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err) ...@@ -396,6 +411,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
if (rctx->left) if (rctx->left)
return; return;
out:
skcipher_request_complete(req, err); skcipher_request_complete(req, err);
} }
......
...@@ -286,6 +286,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err) ...@@ -286,6 +286,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
struct rctx *rctx; struct rctx *rctx;
rctx = skcipher_request_ctx(req); rctx = skcipher_request_ctx(req);
if (err == -EINPROGRESS) {
if (rctx->left != req->cryptlen)
return;
goto out;
}
subreq = &rctx->subreq; subreq = &rctx->subreq;
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
...@@ -293,6 +300,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err) ...@@ -293,6 +300,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
if (rctx->left) if (rctx->left)
return; return;
out:
skcipher_request_complete(req, err); skcipher_request_complete(req, err);
} }
...@@ -330,6 +338,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err) ...@@ -330,6 +338,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
struct rctx *rctx; struct rctx *rctx;
rctx = skcipher_request_ctx(req); rctx = skcipher_request_ctx(req);
if (err == -EINPROGRESS) {
if (rctx->left != req->cryptlen)
return;
goto out;
}
subreq = &rctx->subreq; subreq = &rctx->subreq;
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
...@@ -337,6 +352,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err) ...@@ -337,6 +352,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
if (rctx->left) if (rctx->left)
return; return;
out:
skcipher_request_complete(req, err); skcipher_request_complete(req, err);
} }
......
...@@ -166,6 +166,16 @@ static inline struct ahash_instance *ahash_alloc_instance( ...@@ -166,6 +166,16 @@ static inline struct ahash_instance *ahash_alloc_instance(
return crypto_alloc_instance2(name, alg, ahash_instance_headroom()); return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
} }
static inline void ahash_request_complete(struct ahash_request *req, int err)
{
req->base.complete(&req->base, err);
}
static inline u32 ahash_request_flags(struct ahash_request *req)
{
return req->base.flags;
}
static inline struct crypto_ahash *crypto_spawn_ahash( static inline struct crypto_ahash *crypto_spawn_ahash(
struct crypto_ahash_spawn *spawn) struct crypto_ahash_spawn *spawn)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment