Commit 921715b6 authored by Wenkai Lin's avatar Wenkai Lin Committed by Herbert Xu

crypto: hisilicon/sec - get algorithm bitmap from registers

Add function 'sec_get_alg_bitmap' to get hardware algorithm bitmap
before register algorithm to crypto, instead of determining
whether to register an algorithm based on hardware platform's version.
Signed-off-by: default avatarWenkai Lin <linwenkai6@hisilicon.com>
Signed-off-by: default avatarWeili Qian <qianweili@huawei.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent db700974
...@@ -201,10 +201,28 @@ enum sec_cap_type { ...@@ -201,10 +201,28 @@ enum sec_cap_type {
SEC_RESET_MASK_CAP, SEC_RESET_MASK_CAP,
SEC_OOO_SHUTDOWN_MASK_CAP, SEC_OOO_SHUTDOWN_MASK_CAP,
SEC_CE_MASK_CAP, SEC_CE_MASK_CAP,
SEC_CLUSTER_NUM_CAP,
SEC_CORE_TYPE_NUM_CAP,
SEC_CORE_NUM_CAP,
SEC_CORES_PER_CLUSTER_NUM_CAP,
SEC_CORE_ENABLE_BITMAP,
SEC_DRV_ALG_BITMAP_LOW,
SEC_DRV_ALG_BITMAP_HIGH,
SEC_DEV_ALG_BITMAP_LOW,
SEC_DEV_ALG_BITMAP_HIGH,
SEC_CORE1_ALG_BITMAP_LOW,
SEC_CORE1_ALG_BITMAP_HIGH,
SEC_CORE2_ALG_BITMAP_LOW,
SEC_CORE2_ALG_BITMAP_HIGH,
SEC_CORE3_ALG_BITMAP_LOW,
SEC_CORE3_ALG_BITMAP_HIGH,
SEC_CORE4_ALG_BITMAP_LOW,
SEC_CORE4_ALG_BITMAP_HIGH,
}; };
void sec_destroy_qps(struct hisi_qp **qps, int qp_num); void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void); struct hisi_qp **sec_create_qps(void);
int sec_register_to_crypto(struct hisi_qm *qm); int sec_register_to_crypto(struct hisi_qm *qm);
void sec_unregister_from_crypto(struct hisi_qm *qm); void sec_unregister_from_crypto(struct hisi_qm *qm);
u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low);
#endif #endif
...@@ -104,6 +104,16 @@ ...@@ -104,6 +104,16 @@
#define IV_CTR_INIT 0x1 #define IV_CTR_INIT 0x1
#define IV_BYTE_OFFSET 0x8 #define IV_BYTE_OFFSET 0x8
struct sec_skcipher {
u64 alg_msk;
struct skcipher_alg alg;
};
struct sec_aead {
u64 alg_msk;
struct aead_alg alg;
};
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{ {
...@@ -2158,67 +2168,80 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req) ...@@ -2158,67 +2168,80 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.min_keysize = sec_min_key_size,\ .min_keysize = sec_min_key_size,\
.max_keysize = sec_max_key_size,\ .max_keysize = sec_max_key_size,\
.ivsize = iv_size,\ .ivsize = iv_size,\
}, }
#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \ #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
max_key_size, blk_size, iv_size) \ max_key_size, blk_size, iv_size) \
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \ SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size) sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
static struct skcipher_alg sec_skciphers[] = { static struct sec_skcipher sec_skciphers[] = {
SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, {
AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, .alg_msk = BIT(0),
AES_BLOCK_SIZE, 0) .alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE,
AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0),
SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, },
AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, {
AES_BLOCK_SIZE, AES_BLOCK_SIZE) .alg_msk = BIT(1),
.alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE,
SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, },
AES_BLOCK_SIZE, AES_BLOCK_SIZE) {
.alg_msk = BIT(2),
SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, .alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, AES_MIN_KEY_SIZE,
SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE, AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
DES3_EDE_BLOCK_SIZE, 0) },
{
SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, .alg_msk = BIT(3),
SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE, .alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, SEC_XTS_MIN_KEY_SIZE,
DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE) SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
},
SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, {
SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, .alg_msk = BIT(4),
AES_BLOCK_SIZE, AES_BLOCK_SIZE) .alg = SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, AES_MIN_KEY_SIZE,
AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, },
AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, {
AES_BLOCK_SIZE, AES_BLOCK_SIZE) .alg_msk = BIT(5),
}; .alg = SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, AES_MIN_KEY_SIZE,
AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
static struct skcipher_alg sec_skciphers_v3[] = { },
SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, {
AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, .alg_msk = BIT(12),
SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) .alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE,
AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, },
AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, {
SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) .alg_msk = BIT(13),
.alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE,
SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, },
SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) {
.alg_msk = BIT(14),
SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, .alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, SEC_XTS_MIN_KEY_SIZE,
AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) },
{
SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, .alg_msk = BIT(15),
AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, .alg = SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, AES_MIN_KEY_SIZE,
SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
},
SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, {
AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, .alg_msk = BIT(16),
SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) .alg = SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, AES_MIN_KEY_SIZE,
AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
},
{
.alg_msk = BIT(23),
.alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),
},
{
.alg_msk = BIT(24),
.alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE,
SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
DES3_EDE_BLOCK_SIZE),
},
}; };
static int aead_iv_demension_check(struct aead_request *aead_req) static int aead_iv_demension_check(struct aead_request *aead_req)
...@@ -2412,90 +2435,135 @@ static int sec_aead_decrypt(struct aead_request *a_req) ...@@ -2412,90 +2435,135 @@ static int sec_aead_decrypt(struct aead_request *a_req)
.maxauthsize = max_authsize,\ .maxauthsize = max_authsize,\
} }
static struct aead_alg sec_aeads[] = { static struct sec_aead sec_aeads[] = {
SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", {
sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init, .alg_msk = BIT(6),
sec_aead_ctx_exit, AES_BLOCK_SIZE, .alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
AES_BLOCK_SIZE),
},
{
.alg_msk = BIT(7),
.alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
AES_BLOCK_SIZE),
},
{
.alg_msk = BIT(17),
.alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
AES_BLOCK_SIZE),
},
{
.alg_msk = BIT(18),
.alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
AES_BLOCK_SIZE),
},
{
.alg_msk = BIT(43),
.alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1,
sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
},
{
.alg_msk = BIT(44),
.alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256,
sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
},
{
.alg_msk = BIT(45),
.alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512,
sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
},
};
static void sec_unregister_skcipher(u64 alg_mask, int end)
{
int i;
SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", for (i = 0; i < end; i++)
sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init, if (sec_skciphers[i].alg_msk & alg_mask)
sec_aead_ctx_exit, AES_BLOCK_SIZE, crypto_unregister_skcipher(&sec_skciphers[i].alg);
AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), }
SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", static int sec_register_skcipher(u64 alg_mask)
sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init, {
sec_aead_ctx_exit, AES_BLOCK_SIZE, int i, ret, count;
AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init, count = ARRAY_SIZE(sec_skciphers);
sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
AES_BLOCK_SIZE, AES_BLOCK_SIZE),
SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init, for (i = 0; i < count; i++) {
sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, if (!(sec_skciphers[i].alg_msk & alg_mask))
SEC_AIV_SIZE, AES_BLOCK_SIZE) continue;
};
static struct aead_alg sec_aeads_v3[] = { ret = crypto_register_skcipher(&sec_skciphers[i].alg);
SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init, if (ret)
sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, goto err;
AES_BLOCK_SIZE, AES_BLOCK_SIZE), }
SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init, return 0;
sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
SEC_AIV_SIZE, AES_BLOCK_SIZE) err:
}; sec_unregister_skcipher(alg_mask, i);
return ret;
}
static void sec_unregister_aead(u64 alg_mask, int end)
{
int i;
for (i = 0; i < end; i++)
if (sec_aeads[i].alg_msk & alg_mask)
crypto_unregister_aead(&sec_aeads[i].alg);
}
static int sec_register_aead(u64 alg_mask)
{
int i, ret, count;
count = ARRAY_SIZE(sec_aeads);
for (i = 0; i < count; i++) {
if (!(sec_aeads[i].alg_msk & alg_mask))
continue;
ret = crypto_register_aead(&sec_aeads[i].alg);
if (ret)
goto err;
}
return 0;
err:
sec_unregister_aead(alg_mask, i);
return ret;
}
int sec_register_to_crypto(struct hisi_qm *qm) int sec_register_to_crypto(struct hisi_qm *qm)
{ {
u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
int ret; int ret;
/* To avoid repeat register */ ret = sec_register_skcipher(alg_mask);
ret = crypto_register_skciphers(sec_skciphers,
ARRAY_SIZE(sec_skciphers));
if (ret) if (ret)
return ret; return ret;
if (qm->ver > QM_HW_V2) { ret = sec_register_aead(alg_mask);
ret = crypto_register_skciphers(sec_skciphers_v3,
ARRAY_SIZE(sec_skciphers_v3));
if (ret)
goto reg_skcipher_fail;
}
ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
if (ret) if (ret)
goto reg_aead_fail; sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
if (qm->ver > QM_HW_V2) {
ret = crypto_register_aeads(sec_aeads_v3, ARRAY_SIZE(sec_aeads_v3));
if (ret)
goto reg_aead_v3_fail;
}
return ret;
reg_aead_v3_fail:
crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
reg_aead_fail:
if (qm->ver > QM_HW_V2)
crypto_unregister_skciphers(sec_skciphers_v3,
ARRAY_SIZE(sec_skciphers_v3));
reg_skcipher_fail:
crypto_unregister_skciphers(sec_skciphers,
ARRAY_SIZE(sec_skciphers));
return ret; return ret;
} }
void sec_unregister_from_crypto(struct hisi_qm *qm) void sec_unregister_from_crypto(struct hisi_qm *qm)
{ {
if (qm->ver > QM_HW_V2) u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
crypto_unregister_aeads(sec_aeads_v3,
ARRAY_SIZE(sec_aeads_v3));
crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
if (qm->ver > QM_HW_V2) sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
crypto_unregister_skciphers(sec_skciphers_v3, sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
ARRAY_SIZE(sec_skciphers_v3));
crypto_unregister_skciphers(sec_skciphers,
ARRAY_SIZE(sec_skciphers));
} }
...@@ -41,7 +41,6 @@ ...@@ -41,7 +41,6 @@
#define SEC_ECC_NUM 16 #define SEC_ECC_NUM 16
#define SEC_ECC_MASH 0xFF #define SEC_ECC_MASH 0xFF
#define SEC_CORE_INT_DISABLE 0x0 #define SEC_CORE_INT_DISABLE 0x0
#define SEC_SAA_ENABLE 0x17f
#define SEC_RAS_CE_REG 0x301050 #define SEC_RAS_CE_REG 0x301050
#define SEC_RAS_FE_REG 0x301054 #define SEC_RAS_FE_REG 0x301054
...@@ -114,6 +113,8 @@ ...@@ -114,6 +113,8 @@
#define SEC_DFX_COMMON1_LEN 0x45 #define SEC_DFX_COMMON1_LEN 0x45
#define SEC_DFX_COMMON2_LEN 0xBA #define SEC_DFX_COMMON2_LEN 0xBA
#define SEC_ALG_BITMAP_SHIFT 32
struct sec_hw_error { struct sec_hw_error {
u32 int_msk; u32 int_msk;
const char *msg; const char *msg;
...@@ -141,6 +142,23 @@ static const struct hisi_qm_cap_info sec_basic_info[] = { ...@@ -141,6 +142,23 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
{SEC_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x177, 0x177}, {SEC_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x177, 0x177},
{SEC_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x4, 0x177}, {SEC_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x4, 0x177},
{SEC_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x88, 0xC088}, {SEC_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x88, 0xC088},
{SEC_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x1, 0x1, 0x1},
{SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1},
{SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4},
{SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4},
{SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
{SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x187F0FF},
{SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C},
{SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
{SEC_DEV_ALG_BITMAP_HIGH, 0x3150, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
{SEC_CORE1_ALG_BITMAP_LOW, 0x3154, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
{SEC_CORE1_ALG_BITMAP_HIGH, 0x3158, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
{SEC_CORE2_ALG_BITMAP_LOW, 0x315c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
{SEC_CORE2_ALG_BITMAP_HIGH, 0x3160, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
{SEC_CORE3_ALG_BITMAP_LOW, 0x3164, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
{SEC_CORE3_ALG_BITMAP_HIGH, 0x3168, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
{SEC_CORE4_ALG_BITMAP_LOW, 0x316c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
{SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
}; };
static const struct sec_hw_error sec_hw_errors[] = { static const struct sec_hw_error sec_hw_errors[] = {
...@@ -345,6 +363,16 @@ struct hisi_qp **sec_create_qps(void) ...@@ -345,6 +363,16 @@ struct hisi_qp **sec_create_qps(void)
return NULL; return NULL;
} }
u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low)
{
u32 cap_val_h, cap_val_l;
cap_val_h = hisi_qm_get_hw_info(qm, sec_basic_info, high, qm->cap_ver);
cap_val_l = hisi_qm_get_hw_info(qm, sec_basic_info, low, qm->cap_ver);
return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l;
}
static const struct kernel_param_ops sec_uacce_mode_ops = { static const struct kernel_param_ops sec_uacce_mode_ops = {
.set = uacce_mode_set, .set = uacce_mode_set,
.get = param_get_int, .get = param_get_int,
...@@ -512,7 +540,8 @@ static int sec_engine_init(struct hisi_qm *qm) ...@@ -512,7 +540,8 @@ static int sec_engine_init(struct hisi_qm *qm)
writel(SEC_SINGLE_PORT_MAX_TRANS, writel(SEC_SINGLE_PORT_MAX_TRANS,
qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG); reg = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CORE_ENABLE_BITMAP, qm->cap_ver);
writel(reg, qm->io_base + SEC_SAA_EN_REG);
if (qm->ver < QM_HW_V3) { if (qm->ver < QM_HW_V3) {
/* HW V2 enable sm4 extra mode, as ctr/ecb */ /* HW V2 enable sm4 extra mode, as ctr/ecb */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment