Commit 6b363a28 authored by Devulapally Shiva Krishna's avatar Devulapally Shiva Krishna Committed by David S. Miller

Crypto/chcr: fix ctr, cbc, xts and rfc3686-ctr failed tests

This solves the following issues observed during self test when
CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is enabled.

1. Added fallback for cbc, ctr and rfc3686 if req->nbytes is zero
and for xts added a fallback case if req->nbytes is not multiple of 16.

2. In case of cbc-aes, solved wrong iv update. When
chcr_cipher_fallback() is called, used req->info pointer instead of
reqctx->iv.

3. In cbc-aes decryption there was a wrong result. This occurs when
chcr_cipher_fallback() is called from chcr_handle_cipher_resp().
In the fallback function iv(req->info) used is wrongly updated.
So use the initial iv for this case.

4)In case of ctr-aes encryption observed wrong result. In adjust_ctr_overflow()
there is condition which checks if ((bytes / AES_BLOCK_SIZE) > c),
where c is the number of blocks which can be processed without iv overflow,
but for the above bytes (req->nbytes < 32 , not a multiple of 16) this
condition fails and the 2nd block is corrupted as it requires the rollover iv.
So added a '=' condition in this to take care of this.

5)In rfc3686-ctr there was wrong result observed. This occurs when
chcr_cipher_fallback() is called from chcr_handle_cipher_resp().
Here also copying initial_iv in init_iv pointer for handling the fallback
case correctly.
Signed-off-by: default avatarAyush Sawal <ayush.sawal@chelsio.com>
Signed-off-by: default avatarDevulapally Shiva Krishna <shiva@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d91a3159
...@@ -1054,8 +1054,8 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes) ...@@ -1054,8 +1054,8 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
u32 temp = be32_to_cpu(*--b); u32 temp = be32_to_cpu(*--b);
temp = ~temp; temp = ~temp;
c = (u64)temp + 1; // No of block can processed withou overflow c = (u64)temp + 1; // No of block can processed without overflow
if ((bytes / AES_BLOCK_SIZE) > c) if ((bytes / AES_BLOCK_SIZE) >= c)
bytes = c * AES_BLOCK_SIZE; bytes = c * AES_BLOCK_SIZE;
return bytes; return bytes;
} }
...@@ -1158,15 +1158,16 @@ static int chcr_final_cipher_iv(struct skcipher_request *req, ...@@ -1158,15 +1158,16 @@ static int chcr_final_cipher_iv(struct skcipher_request *req,
static int chcr_handle_cipher_resp(struct skcipher_request *req, static int chcr_handle_cipher_resp(struct skcipher_request *req,
unsigned char *input, int err) unsigned char *input, int err)
{ {
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chcr_context *ctx = c_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct sk_buff *skb;
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct cipher_wr_param wrparam; struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct chcr_dev *dev = c_ctx(tfm)->dev; struct chcr_dev *dev = c_ctx(tfm)->dev;
struct chcr_context *ctx = c_ctx(tfm);
struct adapter *adap = padap(ctx->dev);
struct cipher_wr_param wrparam;
struct sk_buff *skb;
int bytes; int bytes;
if (err) if (err)
...@@ -1197,6 +1198,8 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req, ...@@ -1197,6 +1198,8 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
if (unlikely(bytes == 0)) { if (unlikely(bytes == 0)) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req); req);
memcpy(req->iv, reqctx->init_iv, IV);
atomic_inc(&adap->chcr_stats.fallback);
err = chcr_cipher_fallback(ablkctx->sw_cipher, err = chcr_cipher_fallback(ablkctx->sw_cipher,
req->base.flags, req->base.flags,
req->src, req->src,
...@@ -1248,20 +1251,28 @@ static int process_cipher(struct skcipher_request *req, ...@@ -1248,20 +1251,28 @@ static int process_cipher(struct skcipher_request *req,
struct sk_buff **skb, struct sk_buff **skb,
unsigned short op_type) unsigned short op_type)
{ {
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
unsigned int ivsize = crypto_skcipher_ivsize(tfm); unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct adapter *adap = padap(c_ctx(tfm)->dev);
struct cipher_wr_param wrparam; struct cipher_wr_param wrparam;
int bytes, err = -EINVAL; int bytes, err = -EINVAL;
int subtype;
reqctx->processed = 0; reqctx->processed = 0;
reqctx->partial_req = 0; reqctx->partial_req = 0;
if (!req->iv) if (!req->iv)
goto error; goto error;
subtype = get_cryptoalg_subtype(tfm);
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
(req->cryptlen == 0) || (req->cryptlen == 0) ||
(req->cryptlen % crypto_skcipher_blocksize(tfm))) { (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
goto fallback;
else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
subtype == CRYPTO_ALG_SUB_TYPE_XTS)
goto fallback;
pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n", pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
ablkctx->enckey_len, req->cryptlen, ivsize); ablkctx->enckey_len, req->cryptlen, ivsize);
goto error; goto error;
...@@ -1302,12 +1313,10 @@ static int process_cipher(struct skcipher_request *req, ...@@ -1302,12 +1313,10 @@ static int process_cipher(struct skcipher_request *req,
} else { } else {
bytes = req->cryptlen; bytes = req->cryptlen;
} }
if (get_cryptoalg_subtype(tfm) == if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
CRYPTO_ALG_SUB_TYPE_CTR) {
bytes = adjust_ctr_overflow(req->iv, bytes); bytes = adjust_ctr_overflow(req->iv, bytes);
} }
if (get_cryptoalg_subtype(tfm) == if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE); memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv, memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
CTR_RFC3686_IV_SIZE); CTR_RFC3686_IV_SIZE);
...@@ -1315,20 +1324,25 @@ static int process_cipher(struct skcipher_request *req, ...@@ -1315,20 +1324,25 @@ static int process_cipher(struct skcipher_request *req,
/* initialize counter portion of counter block */ /* initialize counter portion of counter block */
*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
memcpy(reqctx->init_iv, reqctx->iv, IV);
} else { } else {
memcpy(reqctx->iv, req->iv, IV); memcpy(reqctx->iv, req->iv, IV);
memcpy(reqctx->init_iv, req->iv, IV);
} }
if (unlikely(bytes == 0)) { if (unlikely(bytes == 0)) {
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req); req);
fallback: atomic_inc(&adap->chcr_stats.fallback);
err = chcr_cipher_fallback(ablkctx->sw_cipher, err = chcr_cipher_fallback(ablkctx->sw_cipher,
req->base.flags, req->base.flags,
req->src, req->src,
req->dst, req->dst,
req->cryptlen, req->cryptlen,
reqctx->iv, subtype ==
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
reqctx->iv : req->iv,
op_type); op_type);
goto error; goto error;
} }
......
...@@ -302,6 +302,7 @@ struct chcr_skcipher_req_ctx { ...@@ -302,6 +302,7 @@ struct chcr_skcipher_req_ctx {
unsigned int op; unsigned int op;
u16 imm; u16 imm;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
u8 init_iv[CHCR_MAX_CRYPTO_IV_LEN];
u16 txqidx; u16 txqidx;
u16 rxqidx; u16 rxqidx;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment