Commit e9b4913a authored by Horia Geantă's avatar Horia Geantă Committed by Herbert Xu

crypto: caam - fix MDHA key derivation for certain user key lengths

Fuzz testing uncovered an issue when |user key| > |derived key|.
Derived key generation has to be fixed in two cases:

1. Era >= 6 (DKP is available)
DKP cannot be used with immediate input key if |user key| > |derived key|,
since the resulting descriptor (after DKP execution) would be invalid -
having a few bytes from user key left in descriptor buffer
as incorrect opcodes.

Fix DKP usage both in standalone hmac and in authenc algorithms.
For authenc the logic is simplified, by always storing both virtual
and dma key addresses.

2. Era < 6
The same case (|user key| > |derived key|) fails when DKP
is not available.
Make sure gen_split_key() dma maps max(|user key|, |derived key|),
since this is an in-place (bidirectional) operation.
Signed-off-by: default avatarHoria Geantă <horia.geanta@nxp.com>
Reviewed-by: default avatarIuliana Prodan <iuliana.prodan@nxp.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent a2fb864c
...@@ -205,6 +205,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -205,6 +205,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
} }
/*
* In case |user key| > |derived key|, using DKP<imm,imm>
* would result in invalid opcodes (last bytes of user key) in
* the resulting descriptor. Use DKP<ptr,imm> instead => both
* virtual and dma key addresses are needed.
*/
ctx->adata.key_virt = ctx->key;
ctx->adata.key_dma = ctx->key_dma;
ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
data_len[0] = ctx->adata.keylen_pad; data_len[0] = ctx->adata.keylen_pad;
data_len[1] = ctx->cdata.keylen; data_len[1] = ctx->cdata.keylen;
...@@ -221,16 +233,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -221,16 +233,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ARRAY_SIZE(data_len)) < 0) ARRAY_SIZE(data_len)) < 0)
return -EINVAL; return -EINVAL;
if (inl_mask & 1)
ctx->adata.key_virt = ctx->key;
else
ctx->adata.key_dma = ctx->key_dma;
if (inl_mask & 2)
ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
else
ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
ctx->adata.key_inline = !!(inl_mask & 1); ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2); ctx->cdata.key_inline = !!(inl_mask & 2);
...@@ -253,16 +255,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -253,16 +255,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ARRAY_SIZE(data_len)) < 0) ARRAY_SIZE(data_len)) < 0)
return -EINVAL; return -EINVAL;
if (inl_mask & 1)
ctx->adata.key_virt = ctx->key;
else
ctx->adata.key_dma = ctx->key_dma;
if (inl_mask & 2)
ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
else
ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
ctx->adata.key_inline = !!(inl_mask & 1); ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2); ctx->cdata.key_inline = !!(inl_mask & 2);
...@@ -287,16 +279,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -287,16 +279,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ARRAY_SIZE(data_len)) < 0) ARRAY_SIZE(data_len)) < 0)
return -EINVAL; return -EINVAL;
if (inl_mask & 1)
ctx->adata.key_virt = ctx->key;
else
ctx->adata.key_dma = ctx->key_dma;
if (inl_mask & 2)
ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
else
ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
ctx->adata.key_inline = !!(inl_mask & 1); ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2); ctx->cdata.key_inline = !!(inl_mask & 2);
......
...@@ -105,6 +105,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -105,6 +105,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
} }
/*
* In case |user key| > |derived key|, using DKP<imm,imm> would result
* in invalid opcodes (last bytes of user key) in the resulting
* descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
* addresses are needed.
*/
ctx->adata.key_virt = ctx->key;
ctx->adata.key_dma = ctx->key_dma;
ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
data_len[0] = ctx->adata.keylen_pad; data_len[0] = ctx->adata.keylen_pad;
data_len[1] = ctx->cdata.keylen; data_len[1] = ctx->cdata.keylen;
...@@ -118,16 +130,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -118,16 +130,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ARRAY_SIZE(data_len)) < 0) ARRAY_SIZE(data_len)) < 0)
return -EINVAL; return -EINVAL;
if (inl_mask & 1)
ctx->adata.key_virt = ctx->key;
else
ctx->adata.key_dma = ctx->key_dma;
if (inl_mask & 2)
ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
else
ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
ctx->adata.key_inline = !!(inl_mask & 1); ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2); ctx->cdata.key_inline = !!(inl_mask & 2);
...@@ -143,16 +145,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -143,16 +145,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ARRAY_SIZE(data_len)) < 0) ARRAY_SIZE(data_len)) < 0)
return -EINVAL; return -EINVAL;
if (inl_mask & 1)
ctx->adata.key_virt = ctx->key;
else
ctx->adata.key_dma = ctx->key_dma;
if (inl_mask & 2)
ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
else
ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
ctx->adata.key_inline = !!(inl_mask & 1); ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2); ctx->cdata.key_inline = !!(inl_mask & 2);
...@@ -171,16 +163,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -171,16 +163,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ARRAY_SIZE(data_len)) < 0) ARRAY_SIZE(data_len)) < 0)
return -EINVAL; return -EINVAL;
if (inl_mask & 1)
ctx->adata.key_virt = ctx->key;
else
ctx->adata.key_dma = ctx->key_dma;
if (inl_mask & 2)
ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
else
ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
ctx->adata.key_inline = !!(inl_mask & 1); ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2); ctx->cdata.key_inline = !!(inl_mask & 2);
......
...@@ -199,6 +199,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -199,6 +199,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
} }
/*
* In case |user key| > |derived key|, using DKP<imm,imm> would result
* in invalid opcodes (last bytes of user key) in the resulting
* descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
* addresses are needed.
*/
ctx->adata.key_virt = ctx->key;
ctx->adata.key_dma = ctx->key_dma;
ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
data_len[0] = ctx->adata.keylen_pad; data_len[0] = ctx->adata.keylen_pad;
data_len[1] = ctx->cdata.keylen; data_len[1] = ctx->cdata.keylen;
...@@ -210,16 +222,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -210,16 +222,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ARRAY_SIZE(data_len)) < 0) ARRAY_SIZE(data_len)) < 0)
return -EINVAL; return -EINVAL;
if (inl_mask & 1)
ctx->adata.key_virt = ctx->key;
else
ctx->adata.key_dma = ctx->key_dma;
if (inl_mask & 2)
ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
else
ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
ctx->adata.key_inline = !!(inl_mask & 1); ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2); ctx->cdata.key_inline = !!(inl_mask & 2);
...@@ -248,16 +250,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -248,16 +250,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
ARRAY_SIZE(data_len)) < 0) ARRAY_SIZE(data_len)) < 0)
return -EINVAL; return -EINVAL;
if (inl_mask & 1)
ctx->adata.key_virt = ctx->key;
else
ctx->adata.key_dma = ctx->key_dma;
if (inl_mask & 2)
ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
else
ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
ctx->adata.key_inline = !!(inl_mask & 1); ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2); ctx->cdata.key_inline = !!(inl_mask & 2);
...@@ -2999,6 +2991,7 @@ enum hash_optype { ...@@ -2999,6 +2991,7 @@ enum hash_optype {
/** /**
* caam_hash_ctx - ahash per-session context * caam_hash_ctx - ahash per-session context
* @flc: Flow Contexts array * @flc: Flow Contexts array
* @key: authentication key
* @flc_dma: I/O virtual addresses of the Flow Contexts * @flc_dma: I/O virtual addresses of the Flow Contexts
* @dev: dpseci device * @dev: dpseci device
* @ctx_len: size of Context Register * @ctx_len: size of Context Register
...@@ -3006,6 +2999,7 @@ enum hash_optype { ...@@ -3006,6 +2999,7 @@ enum hash_optype {
*/ */
struct caam_hash_ctx { struct caam_hash_ctx {
struct caam_flc flc[HASH_NUM_OP]; struct caam_flc flc[HASH_NUM_OP];
u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
dma_addr_t flc_dma[HASH_NUM_OP]; dma_addr_t flc_dma[HASH_NUM_OP];
struct device *dev; struct device *dev;
int ctx_len; int ctx_len;
...@@ -3306,6 +3300,19 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, ...@@ -3306,6 +3300,19 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
ctx->adata.key_virt = key; ctx->adata.key_virt = key;
ctx->adata.key_inline = true; ctx->adata.key_inline = true;
/*
* In case |user key| > |derived key|, using DKP<imm,imm> would result
* in invalid opcodes (last bytes of user key) in the resulting
* descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
* addresses are needed.
*/
if (keylen > ctx->adata.keylen_pad) {
memcpy(ctx->key, key, keylen);
dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
ctx->adata.keylen_pad,
DMA_TO_DEVICE);
}
ret = ahash_set_sh_desc(ahash); ret = ahash_set_sh_desc(ahash);
kfree(hashed_key); kfree(hashed_key);
return ret; return ret;
...@@ -4536,11 +4543,27 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) ...@@ -4536,11 +4543,27 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
ctx->dev = caam_hash->dev; ctx->dev = caam_hash->dev;
if (alg->setkey) {
ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
ARRAY_SIZE(ctx->key),
DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
dev_err(ctx->dev, "unable to map key\n");
return -ENOMEM;
}
}
dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc), dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->dev, dma_addr)) { if (dma_mapping_error(ctx->dev, dma_addr)) {
dev_err(ctx->dev, "unable to map shared descriptors\n"); dev_err(ctx->dev, "unable to map shared descriptors\n");
if (ctx->adata.key_dma)
dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
ARRAY_SIZE(ctx->key),
DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
return -ENOMEM; return -ENOMEM;
} }
...@@ -4566,6 +4589,10 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) ...@@ -4566,6 +4589,10 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc), dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
if (ctx->adata.key_dma)
dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
} }
static struct caam_hash_alg *caam_hash_alloc(struct device *dev, static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
......
...@@ -96,6 +96,7 @@ struct caam_hash_ctx { ...@@ -96,6 +96,7 @@ struct caam_hash_ctx {
dma_addr_t sh_desc_fin_dma; dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma; dma_addr_t sh_desc_digest_dma;
enum dma_data_direction dir; enum dma_data_direction dir;
enum dma_data_direction key_dir;
struct device *jrdev; struct device *jrdev;
int ctx_len; int ctx_len;
struct alginfo adata; struct alginfo adata;
...@@ -476,6 +477,18 @@ static int ahash_setkey(struct crypto_ahash *ahash, ...@@ -476,6 +477,18 @@ static int ahash_setkey(struct crypto_ahash *ahash,
goto bad_free_key; goto bad_free_key;
memcpy(ctx->key, key, keylen); memcpy(ctx->key, key, keylen);
/*
* In case |user key| > |derived key|, using DKP<imm,imm>
* would result in invalid opcodes (last bytes of user key) in
* the resulting descriptor. Use DKP<ptr,imm> instead => both
* virtual and dma key addresses are needed.
*/
if (keylen > ctx->adata.keylen_pad)
dma_sync_single_for_device(ctx->jrdev,
ctx->adata.key_dma,
ctx->adata.keylen_pad,
DMA_TO_DEVICE);
} else { } else {
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
keylen, CAAM_MAX_HASH_KEY_SIZE); keylen, CAAM_MAX_HASH_KEY_SIZE);
...@@ -1825,40 +1838,50 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) ...@@ -1825,40 +1838,50 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
if (is_xcbc_aes(caam_hash->alg_type)) { if (is_xcbc_aes(caam_hash->alg_type)) {
ctx->dir = DMA_TO_DEVICE; ctx->dir = DMA_TO_DEVICE;
ctx->key_dir = DMA_BIDIRECTIONAL;
ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
ctx->ctx_len = 48; ctx->ctx_len = 48;
ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
ARRAY_SIZE(ctx->key),
DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
dev_err(ctx->jrdev, "unable to map key\n");
caam_jr_free(ctx->jrdev);
return -ENOMEM;
}
} else if (is_cmac_aes(caam_hash->alg_type)) { } else if (is_cmac_aes(caam_hash->alg_type)) {
ctx->dir = DMA_TO_DEVICE; ctx->dir = DMA_TO_DEVICE;
ctx->key_dir = DMA_NONE;
ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
ctx->ctx_len = 32; ctx->ctx_len = 32;
} else { } else {
ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; if (priv->era >= 6) {
ctx->dir = DMA_BIDIRECTIONAL;
ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
} else {
ctx->dir = DMA_TO_DEVICE;
ctx->key_dir = DMA_NONE;
}
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
ctx->ctx_len = runninglen[(ctx->adata.algtype & ctx->ctx_len = runninglen[(ctx->adata.algtype &
OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SUBMASK) >>
OP_ALG_ALGSEL_SHIFT]; OP_ALG_ALGSEL_SHIFT];
} }
if (ctx->key_dir != DMA_NONE) {
ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
ARRAY_SIZE(ctx->key),
ctx->key_dir,
DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
dev_err(ctx->jrdev, "unable to map key\n");
caam_jr_free(ctx->jrdev);
return -ENOMEM;
}
}
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
offsetof(struct caam_hash_ctx, key), offsetof(struct caam_hash_ctx, key),
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) { if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n"); dev_err(ctx->jrdev, "unable to map shared descriptors\n");
if (is_xcbc_aes(caam_hash->alg_type)) if (ctx->key_dir != DMA_NONE)
dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
ARRAY_SIZE(ctx->key), ARRAY_SIZE(ctx->key),
DMA_BIDIRECTIONAL, ctx->key_dir,
DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev); caam_jr_free(ctx->jrdev);
...@@ -1891,9 +1914,9 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) ...@@ -1891,9 +1914,9 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
offsetof(struct caam_hash_ctx, key), offsetof(struct caam_hash_ctx, key),
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (is_xcbc_aes(ctx->adata.algtype)) if (ctx->key_dir != DMA_NONE)
dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL, ARRAY_SIZE(ctx->key), ctx->key_dir,
DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev); caam_jr_free(ctx->jrdev);
} }
......
...@@ -533,14 +533,26 @@ static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata) ...@@ -533,14 +533,26 @@ static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
if (adata->key_inline) { if (adata->key_inline) {
int words; int words;
if (adata->keylen > adata->keylen_pad) {
append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM | OP_PCL_DKP_SRC_PTR |
adata->keylen); OP_PCL_DKP_DST_IMM | adata->keylen);
append_ptr(desc, adata->key_dma);
words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
CAAM_PTR_SZ) / CAAM_CMD_SZ;
} else {
append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
OP_PCL_DKP_SRC_IMM |
OP_PCL_DKP_DST_IMM | adata->keylen);
append_data(desc, adata->key_virt, adata->keylen); append_data(desc, adata->key_virt, adata->keylen);
/* Reserve space in descriptor buffer for the derived key */
words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) - words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ; ALIGN(adata->keylen, CAAM_CMD_SZ)) /
CAAM_CMD_SZ;
}
/* Reserve space in descriptor buffer for the derived key */
if (words) if (words)
(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words); (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
} else { } else {
......
...@@ -48,18 +48,20 @@ int gen_split_key(struct device *jrdev, u8 *key_out, ...@@ -48,18 +48,20 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
u32 *desc; u32 *desc;
struct split_key_result result; struct split_key_result result;
dma_addr_t dma_addr; dma_addr_t dma_addr;
unsigned int local_max;
int ret = -ENOMEM; int ret = -ENOMEM;
adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK); adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
adata->keylen_pad = split_key_pad_len(adata->algtype & adata->keylen_pad = split_key_pad_len(adata->algtype &
OP_ALG_ALGSEL_MASK); OP_ALG_ALGSEL_MASK);
local_max = max(keylen, adata->keylen_pad);
dev_dbg(jrdev, "split keylen %d split keylen padded %d\n", dev_dbg(jrdev, "split keylen %d split keylen padded %d\n",
adata->keylen, adata->keylen_pad); adata->keylen, adata->keylen_pad);
print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
if (adata->keylen_pad > max_keylen) if (local_max > max_keylen)
return -EINVAL; return -EINVAL;
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
...@@ -70,8 +72,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, ...@@ -70,8 +72,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
memcpy(key_out, key_in, keylen); memcpy(key_out, key_in, keylen);
dma_addr = dma_map_single(jrdev, key_out, adata->keylen_pad, dma_addr = dma_map_single(jrdev, key_out, local_max, DMA_BIDIRECTIONAL);
DMA_BIDIRECTIONAL);
if (dma_mapping_error(jrdev, dma_addr)) { if (dma_mapping_error(jrdev, dma_addr)) {
dev_err(jrdev, "unable to map key memory\n"); dev_err(jrdev, "unable to map key memory\n");
goto out_free; goto out_free;
...@@ -117,7 +118,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, ...@@ -117,7 +118,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
adata->keylen_pad, 1); adata->keylen_pad, 1);
} }
dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL); dma_unmap_single(jrdev, dma_addr, local_max, DMA_BIDIRECTIONAL);
out_free: out_free:
kfree(desc); kfree(desc);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment