Commit 1d63e455 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: amlogic-gxl - permit async skcipher as fallback

Even though the amlogic-gxl driver implements asynchronous versions of
ecb(aes) and cbc(aes), the fallbacks it allocates are required to be
synchronous. Given that SIMD based software implementations are usually
asynchronous as well, even though they rarely complete asynchronously
(this typically only happens in cases where the request was made from
softirq context, while SIMD was already in use in the task context that
it interrupted), these implementations are disregarded, and either the
generic C version or another table based version implemented in assembler
is selected instead.

Since falling back to synchronous AES is not only a performance issue,
but potentially a security issue as well (due to the fact that table
based AES is not time invariant), let's fix this, by allocating an
ordinary skcipher as the fallback, and invoke it with the completion
routine that was given to the outer request.
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Tested-by: default avatarCorentin Labbe <clabbe@baylibre.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 3f368b88
...@@ -64,22 +64,20 @@ static int meson_cipher_do_fallback(struct skcipher_request *areq) ...@@ -64,22 +64,20 @@ static int meson_cipher_do_fallback(struct skcipher_request *areq)
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct meson_alg_template *algt; struct meson_alg_template *algt;
#endif
SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback_tfm);
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
algt = container_of(alg, struct meson_alg_template, alg.skcipher); algt = container_of(alg, struct meson_alg_template, alg.skcipher);
algt->stat_fb++; algt->stat_fb++;
#endif #endif
skcipher_request_set_sync_tfm(req, op->fallback_tfm); skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
skcipher_request_set_callback(req, areq->base.flags, NULL, NULL); skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
skcipher_request_set_crypt(req, areq->src, areq->dst, areq->base.complete, areq->base.data);
skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
areq->cryptlen, areq->iv); areq->cryptlen, areq->iv);
if (rctx->op_dir == MESON_DECRYPT) if (rctx->op_dir == MESON_DECRYPT)
err = crypto_skcipher_decrypt(req); err = crypto_skcipher_decrypt(&rctx->fallback_req);
else else
err = crypto_skcipher_encrypt(req); err = crypto_skcipher_encrypt(&rctx->fallback_req);
skcipher_request_zero(req);
return err; return err;
} }
...@@ -321,15 +319,16 @@ int meson_cipher_init(struct crypto_tfm *tfm) ...@@ -321,15 +319,16 @@ int meson_cipher_init(struct crypto_tfm *tfm)
algt = container_of(alg, struct meson_alg_template, alg.skcipher); algt = container_of(alg, struct meson_alg_template, alg.skcipher);
op->mc = algt->mc; op->mc = algt->mc;
sktfm->reqsize = sizeof(struct meson_cipher_req_ctx); op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) { if (IS_ERR(op->fallback_tfm)) {
dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n", dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
name, PTR_ERR(op->fallback_tfm)); name, PTR_ERR(op->fallback_tfm));
return PTR_ERR(op->fallback_tfm); return PTR_ERR(op->fallback_tfm);
} }
sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm);
op->enginectx.op.do_one_request = meson_handle_cipher_request; op->enginectx.op.do_one_request = meson_handle_cipher_request;
op->enginectx.op.prepare_request = NULL; op->enginectx.op.prepare_request = NULL;
op->enginectx.op.unprepare_request = NULL; op->enginectx.op.unprepare_request = NULL;
...@@ -345,7 +344,7 @@ void meson_cipher_exit(struct crypto_tfm *tfm) ...@@ -345,7 +344,7 @@ void meson_cipher_exit(struct crypto_tfm *tfm)
memzero_explicit(op->key, op->keylen); memzero_explicit(op->key, op->keylen);
kfree(op->key); kfree(op->key);
} }
crypto_free_sync_skcipher(op->fallback_tfm); crypto_free_skcipher(op->fallback_tfm);
} }
int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
...@@ -377,5 +376,5 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, ...@@ -377,5 +376,5 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (!op->key) if (!op->key)
return -ENOMEM; return -ENOMEM;
return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
} }
...@@ -109,6 +109,7 @@ struct meson_dev { ...@@ -109,6 +109,7 @@ struct meson_dev {
struct meson_cipher_req_ctx { struct meson_cipher_req_ctx {
u32 op_dir; u32 op_dir;
int flow; int flow;
struct skcipher_request fallback_req; // keep at the end
}; };
/* /*
...@@ -126,7 +127,7 @@ struct meson_cipher_tfm_ctx { ...@@ -126,7 +127,7 @@ struct meson_cipher_tfm_ctx {
u32 keylen; u32 keylen;
u32 keymode; u32 keymode;
struct meson_dev *mc; struct meson_dev *mc;
struct crypto_sync_skcipher *fallback_tfm; struct crypto_skcipher *fallback_tfm;
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment