Commit 4cbe79cc authored by Horia Geantă's avatar Horia Geantă Committed by Herbert Xu

crypto: caam - improve key inlining

For authenc / stitched AEAD algorithms, check independently
each of the two (authentication, encryption) keys whether inlining
is possible.
Prioritize the inlining of the authentication key, since the length
of the (split) key is bigger than that of the encryption key.

For the other algorithms, compute only once per tfm the remaining
available bytes and decide whether key inlining is possible
based on this.
Signed-off-by: default avatarHoria Geantă <horia.geanta@nxp.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 488ebc3a
...@@ -224,7 +224,7 @@ struct caam_ctx { ...@@ -224,7 +224,7 @@ struct caam_ctx {
}; };
static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
int keys_fit_inline, bool is_rfc3686) bool is_rfc3686)
{ {
u32 *key_jump_cmd; u32 *key_jump_cmd;
unsigned int enckeylen = ctx->cdata.keylen; unsigned int enckeylen = ctx->cdata.keylen;
...@@ -244,18 +244,20 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, ...@@ -244,18 +244,20 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
if (is_rfc3686) if (is_rfc3686)
enckeylen -= CTR_RFC3686_NONCE_SIZE; enckeylen -= CTR_RFC3686_NONCE_SIZE;
if (keys_fit_inline) { if (ctx->adata.key_inline)
append_key_as_imm(desc, (void *)ctx->adata.key, append_key_as_imm(desc, (void *)ctx->adata.key,
ctx->adata.keylen_pad, ctx->adata.keylen, ctx->adata.keylen_pad, ctx->adata.keylen,
CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
append_key_as_imm(desc, (void *)ctx->cdata.key, enckeylen, else
enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
} else {
append_key(desc, ctx->adata.key, ctx->adata.keylen, CLASS_2 | append_key(desc, ctx->adata.key, ctx->adata.keylen, CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC); KEY_DEST_MDHA_SPLIT | KEY_ENC);
if (ctx->cdata.key_inline)
append_key_as_imm(desc, (void *)ctx->cdata.key, enckeylen,
enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
else
append_key(desc, ctx->cdata.key, enckeylen, CLASS_1 | append_key(desc, ctx->cdata.key, enckeylen, CLASS_1 |
KEY_DEST_CLASS_REG); KEY_DEST_CLASS_REG);
}
/* Load Counter into CONTEXT1 reg */ /* Load Counter into CONTEXT1 reg */
if (is_rfc3686) { if (is_rfc3686) {
...@@ -282,13 +284,14 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) ...@@ -282,13 +284,14 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd; u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
u32 *desc; u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
ctx->adata.keylen_pad;
/* /*
* Job Descriptor and Shared Descriptors * Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer * must all fit into the 64-word Descriptor h/w Buffer
*/ */
if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN + if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
ctx->adata.keylen_pad <= CAAM_DESC_BYTES_MAX) {
ctx->adata.key_inline = true; ctx->adata.key_inline = true;
ctx->adata.key = (uintptr_t)ctx->key; ctx->adata.key = (uintptr_t)ctx->key;
} else { } else {
...@@ -368,8 +371,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) ...@@ -368,8 +371,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors * Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer * must all fit into the 64-word Descriptor h/w Buffer
*/ */
if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN + if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
ctx->adata.keylen_pad <= CAAM_DESC_BYTES_MAX) {
ctx->adata.key_inline = true; ctx->adata.key_inline = true;
ctx->adata.key = (uintptr_t)ctx->key; ctx->adata.key = (uintptr_t)ctx->key;
} else { } else {
...@@ -463,10 +465,11 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -463,10 +465,11 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
unsigned int ivsize = crypto_aead_ivsize(aead); unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
bool keys_fit_inline;
u32 geniv, moveiv; u32 geniv, moveiv;
u32 ctx1_iv_off = 0; u32 ctx1_iv_off = 0;
u32 *desc; u32 *desc;
u32 inl_mask;
unsigned int data_len[2];
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128); OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686; const bool is_rfc3686 = alg->caam.rfc3686;
...@@ -493,6 +496,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -493,6 +496,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
if (is_rfc3686) if (is_rfc3686)
ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
data_len[0] = ctx->adata.keylen_pad;
data_len[1] = ctx->cdata.keylen;
if (alg->caam.geniv) if (alg->caam.geniv)
goto skip_enc; goto skip_enc;
...@@ -500,24 +506,30 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -500,24 +506,30 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors * Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer * must all fit into the 64-word Descriptor h/w Buffer
*/ */
if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN + if (desc_inline_query(DESC_AEAD_ENC_LEN +
ctx->adata.keylen_pad + ctx->cdata.keylen + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
(is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
CAAM_DESC_BYTES_MAX) { ARRAY_SIZE(data_len)) < 0)
keys_fit_inline = true; return -EINVAL;
if (inl_mask & 1)
ctx->adata.key = (uintptr_t)ctx->key; ctx->adata.key = (uintptr_t)ctx->key;
ctx->cdata.key = (uintptr_t)(ctx->key + ctx->adata.keylen_pad); else
} else {
keys_fit_inline = false;
ctx->adata.key = ctx->key_dma; ctx->adata.key = ctx->key_dma;
if (inl_mask & 2)
ctx->cdata.key = (uintptr_t)(ctx->key + ctx->adata.keylen_pad);
else
ctx->cdata.key = ctx->key_dma + ctx->adata.keylen_pad; ctx->cdata.key = ctx->key_dma + ctx->adata.keylen_pad;
}
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
/* aead_encrypt shared descriptor */ /* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc; desc = ctx->sh_desc_enc;
/* Note: Context registers are saved. */ /* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); init_sh_desc_key_aead(desc, ctx, is_rfc3686);
/* Class 2 operation */ /* Class 2 operation */
append_operation(desc, ctx->adata.algtype | OP_ALG_AS_INITFINAL | append_operation(desc, ctx->adata.algtype | OP_ALG_AS_INITFINAL |
...@@ -572,24 +584,30 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -572,24 +584,30 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors * Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer * must all fit into the 64-word Descriptor h/w Buffer
*/ */
if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN + if (desc_inline_query(DESC_AEAD_DEC_LEN +
ctx->adata.keylen_pad + ctx->cdata.keylen + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
(is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
CAAM_DESC_BYTES_MAX) { ARRAY_SIZE(data_len)) < 0)
keys_fit_inline = true; return -EINVAL;
if (inl_mask & 1)
ctx->adata.key = (uintptr_t)ctx->key; ctx->adata.key = (uintptr_t)ctx->key;
ctx->cdata.key = (uintptr_t)(ctx->key + ctx->adata.keylen_pad); else
} else {
keys_fit_inline = false;
ctx->adata.key = ctx->key_dma; ctx->adata.key = ctx->key_dma;
if (inl_mask & 2)
ctx->cdata.key = (uintptr_t)(ctx->key + ctx->adata.keylen_pad);
else
ctx->cdata.key = ctx->key_dma + ctx->adata.keylen_pad; ctx->cdata.key = ctx->key_dma + ctx->adata.keylen_pad;
}
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
/* aead_decrypt shared descriptor */ /* aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec; desc = ctx->sh_desc_dec;
/* Note: Context registers are saved. */ /* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); init_sh_desc_key_aead(desc, ctx, is_rfc3686);
/* Class 2 operation */ /* Class 2 operation */
append_operation(desc, ctx->adata.algtype | OP_ALG_AS_INITFINAL | append_operation(desc, ctx->adata.algtype | OP_ALG_AS_INITFINAL |
...@@ -660,24 +678,30 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ...@@ -660,24 +678,30 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors * Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer * must all fit into the 64-word Descriptor h/w Buffer
*/ */
if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN + if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
ctx->adata.keylen_pad + ctx->cdata.keylen + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
(is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <= AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
CAAM_DESC_BYTES_MAX) { ARRAY_SIZE(data_len)) < 0)
keys_fit_inline = true; return -EINVAL;
if (inl_mask & 1)
ctx->adata.key = (uintptr_t)ctx->key; ctx->adata.key = (uintptr_t)ctx->key;
ctx->cdata.key = (uintptr_t)(ctx->key + ctx->adata.keylen_pad); else
} else {
keys_fit_inline = false;
ctx->adata.key = ctx->key_dma; ctx->adata.key = ctx->key_dma;
if (inl_mask & 2)
ctx->cdata.key = (uintptr_t)(ctx->key + ctx->adata.keylen_pad);
else
ctx->cdata.key = ctx->key_dma + ctx->adata.keylen_pad; ctx->cdata.key = ctx->key_dma + ctx->adata.keylen_pad;
}
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
/* aead_givencrypt shared descriptor */ /* aead_givencrypt shared descriptor */
desc = ctx->sh_desc_enc; desc = ctx->sh_desc_enc;
/* Note: Context registers are saved. */ /* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); init_sh_desc_key_aead(desc, ctx, is_rfc3686);
if (is_rfc3686) if (is_rfc3686)
goto copy_iv; goto copy_iv;
...@@ -787,6 +811,8 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) ...@@ -787,6 +811,8 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
u32 *key_jump_cmd, *zero_payload_jump_cmd, u32 *key_jump_cmd, *zero_payload_jump_cmd,
*zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2; *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
u32 *desc; u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
if (!ctx->cdata.keylen || !ctx->authsize) if (!ctx->cdata.keylen || !ctx->authsize)
return 0; return 0;
...@@ -796,8 +822,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) ...@@ -796,8 +822,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor * Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer * must fit into the 64-word Descriptor h/w Buffer
*/ */
if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN + if (rem_bytes >= DESC_GCM_ENC_LEN) {
ctx->cdata.keylen <= CAAM_DESC_BYTES_MAX) {
ctx->cdata.key_inline = true; ctx->cdata.key_inline = true;
ctx->cdata.key = (uintptr_t)ctx->key; ctx->cdata.key = (uintptr_t)ctx->key;
} else { } else {
...@@ -895,8 +920,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) ...@@ -895,8 +920,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors * Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer * must all fit into the 64-word Descriptor h/w Buffer
*/ */
if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN + if (rem_bytes >= DESC_GCM_DEC_LEN) {
ctx->cdata.keylen <= CAAM_DESC_BYTES_MAX) {
ctx->cdata.key_inline = true; ctx->cdata.key_inline = true;
ctx->cdata.key = (uintptr_t)ctx->key; ctx->cdata.key = (uintptr_t)ctx->key;
} else { } else {
...@@ -996,6 +1020,8 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) ...@@ -996,6 +1020,8 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
u32 *key_jump_cmd; u32 *key_jump_cmd;
u32 *desc; u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
if (!ctx->cdata.keylen || !ctx->authsize) if (!ctx->cdata.keylen || !ctx->authsize)
return 0; return 0;
...@@ -1005,8 +1031,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) ...@@ -1005,8 +1031,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor * Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer * must fit into the 64-word Descriptor h/w Buffer
*/ */
if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN + if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
ctx->cdata.keylen <= CAAM_DESC_BYTES_MAX) {
ctx->cdata.key_inline = true; ctx->cdata.key_inline = true;
ctx->cdata.key = (uintptr_t)ctx->key; ctx->cdata.key = (uintptr_t)ctx->key;
} else { } else {
...@@ -1084,8 +1109,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) ...@@ -1084,8 +1109,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors * Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer * must all fit into the 64-word Descriptor h/w Buffer
*/ */
if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN + if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
ctx->cdata.keylen <= CAAM_DESC_BYTES_MAX) {
ctx->cdata.key_inline = true; ctx->cdata.key_inline = true;
ctx->cdata.key = (uintptr_t)ctx->key; ctx->cdata.key = (uintptr_t)ctx->key;
} else { } else {
...@@ -1180,6 +1204,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) ...@@ -1180,6 +1204,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
u32 *key_jump_cmd; u32 *key_jump_cmd;
u32 *read_move_cmd, *write_move_cmd; u32 *read_move_cmd, *write_move_cmd;
u32 *desc; u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
if (!ctx->cdata.keylen || !ctx->authsize) if (!ctx->cdata.keylen || !ctx->authsize)
return 0; return 0;
...@@ -1189,8 +1215,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) ...@@ -1189,8 +1215,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor * Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer * must fit into the 64-word Descriptor h/w Buffer
*/ */
if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN + if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
ctx->cdata.keylen <= CAAM_DESC_BYTES_MAX) {
ctx->cdata.key_inline = true; ctx->cdata.key_inline = true;
ctx->cdata.key = (uintptr_t)ctx->key; ctx->cdata.key = (uintptr_t)ctx->key;
} else { } else {
...@@ -1267,8 +1292,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) ...@@ -1267,8 +1292,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors * Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer * must all fit into the 64-word Descriptor h/w Buffer
*/ */
if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN + if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
ctx->cdata.keylen <= CAAM_DESC_BYTES_MAX) {
ctx->cdata.key_inline = true; ctx->cdata.key_inline = true;
ctx->cdata.key = (uintptr_t)ctx->key; ctx->cdata.key = (uintptr_t)ctx->key;
} else { } else {
......
...@@ -449,3 +449,42 @@ struct alginfo { ...@@ -449,3 +449,42 @@ struct alginfo {
u64 key; u64 key;
bool key_inline; bool key_inline;
}; };
/**
* desc_inline_query() - Provide indications on which data items can be inlined
* and which shall be referenced in a shared descriptor.
* @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
* excluding the data items to be inlined (or corresponding
* pointer if an item is not inlined). Each cnstr_* function that
* generates descriptors should have a define mentioning
* corresponding length.
* @jd_len: Maximum length of the job descriptor(s) that will be used
* together with the shared descriptor.
* @data_len: Array of lengths of the data items trying to be inlined
* @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
* otherwise.
* @count: Number of data items (size of @data_len array); must be <= 32
*
* Return: 0 if data can be inlined / referenced, negative value if not. If 0,
* check @inl_mask for details.
*/
static inline int desc_inline_query(unsigned int sd_base_len,
unsigned int jd_len, unsigned int *data_len,
u32 *inl_mask, unsigned int count)
{
int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
unsigned int i;
*inl_mask = 0;
for (i = 0; (i < count) && (rem_bytes > 0); i++) {
if (rem_bytes - (int)(data_len[i] +
(count - i - 1) * CAAM_PTR_SZ) >= 0) {
rem_bytes -= data_len[i];
*inl_mask |= (1 << i);
} else {
rem_bytes -= CAAM_PTR_SZ;
}
}
return (rem_bytes >= 0) ? 0 : -1;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment