Commit e787060b authored by Eric Biggers's avatar Eric Biggers Committed by Herbert Xu

crypto: x86/aes-xts - wire up VAES + AVX2 implementation

Add an AES-XTS implementation "xts-aes-vaes-avx2" for x86_64 CPUs with
the VAES, VPCLMULQDQ, and AVX2 extensions, but not AVX512 or AVX10.
This implementation uses ymm registers to operate on two AES blocks at a
time.  The assembly code is instantiated using a macro so that most of
the source code is shared with other implementations.

This is the optimal implementation on AMD Zen 3.  It should also be the
optimal implementation on Intel Alder Lake, which similarly supports
VAES but not AVX512.  Comparing to xts-aes-aesni-avx on Zen 3,
xts-aes-vaes-avx2 provides 70% higher AES-256-XTS decryption throughput
with 4096-byte messages, or 23% higher with 512-byte messages.

A large improvement is also seen with CPUs that do support AVX512 (e.g.,
98% higher AES-256-XTS decryption throughput on Ice Lake with 4096-byte
messages), though the following patches add AVX512 optimized
implementations to get a bit more performance on those CPUs.
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 996f4dcb
...@@ -807,3 +807,14 @@ SYM_FUNC_END(aes_xts_encrypt_aesni_avx) ...@@ -807,3 +807,14 @@ SYM_FUNC_END(aes_xts_encrypt_aesni_avx)
SYM_TYPED_FUNC_START(aes_xts_decrypt_aesni_avx) SYM_TYPED_FUNC_START(aes_xts_decrypt_aesni_avx)
_aes_xts_crypt 0 _aes_xts_crypt 0
SYM_FUNC_END(aes_xts_decrypt_aesni_avx) SYM_FUNC_END(aes_xts_decrypt_aesni_avx)
#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
.set VL, 32
.set USE_AVX10, 0
SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx2)
_aes_xts_crypt 1
SYM_FUNC_END(aes_xts_encrypt_vaes_avx2)
SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx2)
_aes_xts_crypt 0
SYM_FUNC_END(aes_xts_decrypt_vaes_avx2)
#endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */
...@@ -1295,6 +1295,9 @@ static struct skcipher_alg aes_xts_alg_##suffix = { \ ...@@ -1295,6 +1295,9 @@ static struct skcipher_alg aes_xts_alg_##suffix = { \
static struct simd_skcipher_alg *aes_xts_simdalg_##suffix static struct simd_skcipher_alg *aes_xts_simdalg_##suffix
DEFINE_XTS_ALG(aesni_avx, "xts-aes-aesni-avx", 500); DEFINE_XTS_ALG(aesni_avx, "xts-aes-aesni-avx", 500);
#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
DEFINE_XTS_ALG(vaes_avx2, "xts-aes-vaes-avx2", 600);
#endif
static int __init register_xts_algs(void) static int __init register_xts_algs(void)
{ {
...@@ -1306,6 +1309,18 @@ static int __init register_xts_algs(void) ...@@ -1306,6 +1309,18 @@ static int __init register_xts_algs(void)
&aes_xts_simdalg_aesni_avx); &aes_xts_simdalg_aesni_avx);
if (err) if (err)
return err; return err;
#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
if (!boot_cpu_has(X86_FEATURE_AVX2) ||
!boot_cpu_has(X86_FEATURE_VAES) ||
!boot_cpu_has(X86_FEATURE_VPCLMULQDQ) ||
!boot_cpu_has(X86_FEATURE_PCLMULQDQ) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
return 0;
err = simd_register_skciphers_compat(&aes_xts_alg_vaes_avx2, 1,
&aes_xts_simdalg_vaes_avx2);
if (err)
return err;
#endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */
return 0; return 0;
} }
...@@ -1314,6 +1329,11 @@ static void unregister_xts_algs(void) ...@@ -1314,6 +1329,11 @@ static void unregister_xts_algs(void)
if (aes_xts_simdalg_aesni_avx) if (aes_xts_simdalg_aesni_avx)
simd_unregister_skciphers(&aes_xts_alg_aesni_avx, 1, simd_unregister_skciphers(&aes_xts_alg_aesni_avx, 1,
&aes_xts_simdalg_aesni_avx); &aes_xts_simdalg_aesni_avx);
#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
if (aes_xts_simdalg_vaes_avx2)
simd_unregister_skciphers(&aes_xts_alg_vaes_avx2, 1,
&aes_xts_simdalg_vaes_avx2);
#endif
} }
#else /* CONFIG_X86_64 */ #else /* CONFIG_X86_64 */
static int __init register_xts_algs(void) static int __init register_xts_algs(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment