Commit dc6fef2c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
 "This fixes the following issues:
   - Zero-length DMA mapping in caam
   - Invalidly mapping stack memory for DMA in talitos
   - Use after free in cavium/nitrox
   - Key parsing in authenc
   - Undefined shift in sm3
   - Bogus completion call in authencesn
   - SHA support detection in caam"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: sm3 - fix undefined shift by >= width of value
  crypto: talitos - fix ablkcipher for CONFIG_VMAP_STACK
  crypto: talitos - reorder code in talitos_edesc_alloc()
  crypto: adiantum - initialize crypto_spawn::inst
  crypto: cavium/nitrox - Use after free in process_response_list()
  crypto: authencesn - Avoid twice completion call in decrypt path
  crypto: caam - fix SHA support detection
  crypto: caam - fix zero-length buffer DMA mapping
  crypto: ccree - convert to use crypto_authenc_extractkeys()
  crypto: bcm - convert to use crypto_authenc_extractkeys()
  crypto: authenc - fix parsing key with misaligned rta_len
parents 6e434bf2 d45a90cb
...@@ -539,6 +539,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) ...@@ -539,6 +539,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
ictx = skcipher_instance_ctx(inst); ictx = skcipher_instance_ctx(inst);
/* Stream cipher, e.g. "xchacha12" */ /* Stream cipher, e.g. "xchacha12" */
crypto_set_skcipher_spawn(&ictx->streamcipher_spawn,
skcipher_crypto_instance(inst));
err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name, err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name,
0, crypto_requires_sync(algt->type, 0, crypto_requires_sync(algt->type,
algt->mask)); algt->mask));
...@@ -547,6 +549,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) ...@@ -547,6 +549,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
/* Block cipher, e.g. "aes" */ /* Block cipher, e.g. "aes" */
crypto_set_spawn(&ictx->blockcipher_spawn,
skcipher_crypto_instance(inst));
err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name, err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name,
CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK);
if (err) if (err)
......
...@@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, ...@@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
return -EINVAL; return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
return -EINVAL; return -EINVAL;
if (RTA_PAYLOAD(rta) < sizeof(*param))
/*
* RTA_OK() didn't align the rtattr's payload when validating that it
* fits in the buffer. Yet, the keys should start on the next 4-byte
* aligned boundary. To avoid confusion, require that the rtattr
* payload be exactly the param struct, which has a 4-byte aligned size.
*/
if (RTA_PAYLOAD(rta) != sizeof(*param))
return -EINVAL; return -EINVAL;
BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
param = RTA_DATA(rta); param = RTA_DATA(rta);
keys->enckeylen = be32_to_cpu(param->enckeylen); keys->enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len); key += rta->rta_len;
keylen -= RTA_ALIGN(rta->rta_len); keylen -= rta->rta_len;
if (keylen < keys->enckeylen) if (keylen < keys->enckeylen)
return -EINVAL; return -EINVAL;
......
...@@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, ...@@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
struct aead_request *req = areq->data; struct aead_request *req = areq->data;
err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
aead_request_complete(req, err); authenc_esn_request_complete(req, err);
} }
static int crypto_authenc_esn_decrypt(struct aead_request *req) static int crypto_authenc_esn_decrypt(struct aead_request *req)
......
...@@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m) ...@@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m)
for (i = 0; i <= 63; i++) { for (i = 0; i <= 63; i++) {
ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
ss2 = ss1 ^ rol32(a, 12); ss2 = ss1 ^ rol32(a, 12);
......
...@@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU ...@@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU
depends on ARCH_BCM_IPROC depends on ARCH_BCM_IPROC
depends on MAILBOX depends on MAILBOX
default m default m
select CRYPTO_AUTHENC
select CRYPTO_DES select CRYPTO_DES
select CRYPTO_MD5 select CRYPTO_MD5
select CRYPTO_SHA1 select CRYPTO_SHA1
......
...@@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, ...@@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
struct spu_hw *spu = &iproc_priv.spu; struct spu_hw *spu = &iproc_priv.spu;
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
struct crypto_tfm *tfm = crypto_aead_tfm(cipher); struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
struct rtattr *rta = (void *)key; struct crypto_authenc_keys keys;
struct crypto_authenc_key_param *param; int ret;
const u8 *origkey = key;
const unsigned int origkeylen = keylen;
int ret = 0;
flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
keylen); keylen);
flow_dump(" key: ", key, keylen); flow_dump(" key: ", key, keylen);
if (!RTA_OK(rta, keylen)) ret = crypto_authenc_extractkeys(&keys, key, keylen);
goto badkey; if (ret)
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey; goto badkey;
param = RTA_DATA(rta); if (keys.enckeylen > MAX_KEY_SIZE ||
ctx->enckeylen = be32_to_cpu(param->enckeylen); keys.authkeylen > MAX_KEY_SIZE)
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < ctx->enckeylen)
goto badkey;
if (ctx->enckeylen > MAX_KEY_SIZE)
goto badkey; goto badkey;
ctx->authkeylen = keylen - ctx->enckeylen; ctx->enckeylen = keys.enckeylen;
ctx->authkeylen = keys.authkeylen;
if (ctx->authkeylen > MAX_KEY_SIZE)
goto badkey;
memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
/* May end up padding auth key. So make sure it's zeroed. */ /* May end up padding auth key. So make sure it's zeroed. */
memset(ctx->authkey, 0, sizeof(ctx->authkey)); memset(ctx->authkey, 0, sizeof(ctx->authkey));
memcpy(ctx->authkey, key, ctx->authkeylen); memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
switch (ctx->alg->cipher_info.alg) { switch (ctx->alg->cipher_info.alg) {
case CIPHER_ALG_DES: case CIPHER_ALG_DES:
...@@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, ...@@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
u32 tmp[DES_EXPKEY_WORDS]; u32 tmp[DES_EXPKEY_WORDS];
u32 flags = CRYPTO_TFM_RES_WEAK_KEY; u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
if (des_ekey(tmp, key) == 0) { if (des_ekey(tmp, keys.enckey) == 0) {
if (crypto_aead_get_flags(cipher) & if (crypto_aead_get_flags(cipher) &
CRYPTO_TFM_REQ_WEAK_KEY) { CRYPTO_TFM_REQ_WEAK_KEY) {
crypto_aead_set_flags(cipher, flags); crypto_aead_set_flags(cipher, flags);
...@@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, ...@@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
break; break;
case CIPHER_ALG_3DES: case CIPHER_ALG_3DES:
if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
const u32 *K = (const u32 *)key; const u32 *K = (const u32 *)keys.enckey;
u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
...@@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, ...@@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
ctx->fallback_cipher->base.crt_flags |= ctx->fallback_cipher->base.crt_flags |=
tfm->crt_flags & CRYPTO_TFM_REQ_MASK; tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
ret = ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
crypto_aead_setkey(ctx->fallback_cipher, origkey,
origkeylen);
if (ret) { if (ret) {
flow_log(" fallback setkey() returned:%d\n", ret); flow_log(" fallback setkey() returned:%d\n", ret);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
......
...@@ -3476,7 +3476,7 @@ static int __init caam_algapi_init(void) ...@@ -3476,7 +3476,7 @@ static int __init caam_algapi_init(void)
* Skip algorithms requiring message digests * Skip algorithms requiring message digests
* if MD or MD size is not supported by device. * if MD or MD size is not supported by device.
*/ */
if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && if (is_mdha(c2_alg_sel) &&
(!md_inst || t_alg->aead.maxauthsize > md_limit)) (!md_inst || t_alg->aead.maxauthsize > md_limit))
continue; continue;
......
...@@ -1072,13 +1072,16 @@ static int ahash_final_no_ctx(struct ahash_request *req) ...@@ -1072,13 +1072,16 @@ static int ahash_final_no_ctx(struct ahash_request *req)
desc = edesc->hw_desc; desc = edesc->hw_desc;
state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); if (buflen) {
if (dma_mapping_error(jrdev, state->buf_dma)) { state->buf_dma = dma_map_single(jrdev, buf, buflen,
dev_err(jrdev, "unable to map src\n"); DMA_TO_DEVICE);
goto unmap; if (dma_mapping_error(jrdev, state->buf_dma)) {
} dev_err(jrdev, "unable to map src\n");
goto unmap;
}
append_seq_in_ptr(desc, state->buf_dma, buflen, 0); append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
}
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize); digestsize);
......
...@@ -1155,6 +1155,7 @@ ...@@ -1155,6 +1155,7 @@
#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
......
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
#ifndef CAAM_ERROR_H #ifndef CAAM_ERROR_H
#define CAAM_ERROR_H #define CAAM_ERROR_H
#include "desc.h"
#define CAAM_ERROR_STR_MAX 302 #define CAAM_ERROR_STR_MAX 302
void caam_strstatus(struct device *dev, u32 status, bool qi_v2); void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
...@@ -17,4 +20,10 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2); ...@@ -17,4 +20,10 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg, int rowsize, int groupsize, struct scatterlist *sg,
size_t tlen, bool ascii); size_t tlen, bool ascii);
static inline bool is_mdha(u32 algtype)
{
return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) ==
OP_ALG_CHA_MDHA;
}
#endif /* CAAM_ERROR_H */ #endif /* CAAM_ERROR_H */
...@@ -567,10 +567,10 @@ static void process_response_list(struct nitrox_cmdq *cmdq) ...@@ -567,10 +567,10 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
/* ORH error code */ /* ORH error code */
err = READ_ONCE(*sr->resp.orh) & 0xff; err = READ_ONCE(*sr->resp.orh) & 0xff;
softreq_destroy(sr);
if (sr->callback) if (sr->callback)
sr->callback(sr->cb_arg, err); sr->callback(sr->cb_arg, err);
softreq_destroy(sr);
req_completed++; req_completed++;
} }
......
...@@ -549,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, ...@@ -549,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct rtattr *rta = (struct rtattr *)key;
struct cc_crypto_req cc_req = {}; struct cc_crypto_req cc_req = {};
struct crypto_authenc_key_param *param;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
int rc = -EINVAL;
unsigned int seq_len = 0; unsigned int seq_len = 0;
struct device *dev = drvdata_to_dev(ctx->drvdata); struct device *dev = drvdata_to_dev(ctx->drvdata);
const u8 *enckey, *authkey;
int rc;
dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
...@@ -563,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, ...@@ -563,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */ /* STAT_PHASE_0: Init and sanity checks */
if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
if (!RTA_OK(rta, keylen)) struct crypto_authenc_keys keys;
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) rc = crypto_authenc_extractkeys(&keys, key, keylen);
goto badkey; if (rc)
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey;
param = RTA_DATA(rta);
ctx->enc_keylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < ctx->enc_keylen)
goto badkey; goto badkey;
ctx->auth_keylen = keylen - ctx->enc_keylen; enckey = keys.enckey;
authkey = keys.authkey;
ctx->enc_keylen = keys.enckeylen;
ctx->auth_keylen = keys.authkeylen;
if (ctx->cipher_mode == DRV_CIPHER_CTR) { if (ctx->cipher_mode == DRV_CIPHER_CTR) {
/* the nonce is stored in bytes at end of key */ /* the nonce is stored in bytes at end of key */
rc = -EINVAL;
if (ctx->enc_keylen < if (ctx->enc_keylen <
(AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
goto badkey; goto badkey;
/* Copy nonce from last 4 bytes in CTR key to /* Copy nonce from last 4 bytes in CTR key to
* first 4 bytes in CTR IV * first 4 bytes in CTR IV
*/ */
memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
CTR_RFC3686_NONCE_SIZE);
/* Set CTR key size */ /* Set CTR key size */
ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
} }
} else { /* non-authenc - has just one key */ } else { /* non-authenc - has just one key */
enckey = key;
authkey = NULL;
ctx->enc_keylen = keylen; ctx->enc_keylen = keylen;
ctx->auth_keylen = 0; ctx->auth_keylen = 0;
} }
...@@ -603,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, ...@@ -603,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
/* STAT_PHASE_1: Copy key to ctx */ /* STAT_PHASE_1: Copy key to ctx */
/* Get key material */ /* Get key material */
memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen); memcpy(ctx->enckey, enckey, ctx->enc_keylen);
if (ctx->enc_keylen == 24) if (ctx->enc_keylen == 24)
memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen); memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
ctx->auth_keylen);
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen); rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
if (rc) if (rc)
goto badkey; goto badkey;
} }
......
...@@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, ...@@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
struct talitos_private *priv = dev_get_drvdata(dev); struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv); bool is_sec1 = has_ftr_sec1(priv);
int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
void *err;
if (cryptlen + authsize > max_len) { if (cryptlen + authsize > max_len) {
dev_err(dev, "length exceeds h/w max limit\n"); dev_err(dev, "length exceeds h/w max limit\n");
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (ivsize)
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
if (!dst || dst == src) { if (!dst || dst == src) {
src_len = assoclen + cryptlen + authsize; src_len = assoclen + cryptlen + authsize;
src_nents = sg_nents_for_len(src, src_len); src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) { if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n"); dev_err(dev, "Invalid number of src SG.\n");
err = ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
goto error_sg;
} }
src_nents = (src_nents == 1) ? 0 : src_nents; src_nents = (src_nents == 1) ? 0 : src_nents;
dst_nents = dst ? src_nents : 0; dst_nents = dst ? src_nents : 0;
...@@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, ...@@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
src_nents = sg_nents_for_len(src, src_len); src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) { if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n"); dev_err(dev, "Invalid number of src SG.\n");
err = ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
goto error_sg;
} }
src_nents = (src_nents == 1) ? 0 : src_nents; src_nents = (src_nents == 1) ? 0 : src_nents;
dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
dst_nents = sg_nents_for_len(dst, dst_len); dst_nents = sg_nents_for_len(dst, dst_len);
if (dst_nents < 0) { if (dst_nents < 0) {
dev_err(dev, "Invalid number of dst SG.\n"); dev_err(dev, "Invalid number of dst SG.\n");
err = ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
goto error_sg;
} }
dst_nents = (dst_nents == 1) ? 0 : dst_nents; dst_nents = (dst_nents == 1) ? 0 : dst_nents;
} }
...@@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, ...@@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
/* if its a ahash, add space for a second desc next to the first one */ /* if its a ahash, add space for a second desc next to the first one */
if (is_sec1 && !dst) if (is_sec1 && !dst)
alloc_len += sizeof(struct talitos_desc); alloc_len += sizeof(struct talitos_desc);
alloc_len += ivsize;
edesc = kmalloc(alloc_len, GFP_DMA | flags); edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) { if (!edesc)
err = ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
goto error_sg; if (ivsize) {
iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
} }
memset(&edesc->desc, 0, sizeof(edesc->desc)); memset(&edesc->desc, 0, sizeof(edesc->desc));
...@@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, ...@@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
} }
return edesc; return edesc;
error_sg:
if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
return err;
} }
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment