Commit 0a6a40c2 authored by Eric Biggers's avatar Eric Biggers Committed by Herbert Xu

crypto: aes_ti - disable interrupts while accessing S-box

In the "aes-fixed-time" AES implementation, disable interrupts while
accessing the S-box, in order to make cache-timing attacks more
difficult.  Previously it was possible for the CPU to be interrupted
while the S-box was loaded into L1 cache, potentially evicting the
cachelines and causing later table lookups to be time-variant.

In tests I did on x86 and ARM, this doesn't affect performance
significantly.  Responsiveness is potentially a concern, but interrupts
are only disabled for a single AES block.

Note that even after this change, the implementation still isn't
necessarily guaranteed to be constant-time; see
https://cr.yp.to/antiforgery/cachetiming-20050414.pdf for a discussion
of the many difficulties involved in writing truly constant-time AES
software.  But it's valuable to make such attacks more difficult.
Reviewed-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 9f4debe3
...@@ -1006,7 +1006,8 @@ config CRYPTO_AES_TI ...@@ -1006,7 +1006,8 @@ config CRYPTO_AES_TI
8 for decryption), this implementation only uses just two S-boxes of 8 for decryption), this implementation only uses just two S-boxes of
256 bytes each, and attempts to eliminate data dependent latencies by 256 bytes each, and attempts to eliminate data dependent latencies by
prefetching the entire table into the cache at the start of each prefetching the entire table into the cache at the start of each
block. block. Interrupts are also disabled to avoid races where cachelines
are evicted when the CPU is interrupted to do something else.
config CRYPTO_AES_586 config CRYPTO_AES_586
tristate "AES cipher algorithms (i586)" tristate "AES cipher algorithms (i586)"
......
...@@ -269,6 +269,7 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ...@@ -269,6 +269,7 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
const u32 *rkp = ctx->key_enc + 4; const u32 *rkp = ctx->key_enc + 4;
int rounds = 6 + ctx->key_length / 4; int rounds = 6 + ctx->key_length / 4;
u32 st0[4], st1[4]; u32 st0[4], st1[4];
unsigned long flags;
int round; int round;
st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in); st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
...@@ -276,6 +277,12 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ...@@ -276,6 +277,12 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8); st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12); st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
/*
* Temporarily disable interrupts to avoid races where cachelines are
* evicted when the CPU is interrupted to do something else.
*/
local_irq_save(flags);
st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128]; st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128];
st0[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160]; st0[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160];
st0[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192]; st0[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192];
...@@ -300,6 +307,8 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ...@@ -300,6 +307,8 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4); put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8); put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12); put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
local_irq_restore(flags);
} }
static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
...@@ -308,6 +317,7 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ...@@ -308,6 +317,7 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
const u32 *rkp = ctx->key_dec + 4; const u32 *rkp = ctx->key_dec + 4;
int rounds = 6 + ctx->key_length / 4; int rounds = 6 + ctx->key_length / 4;
u32 st0[4], st1[4]; u32 st0[4], st1[4];
unsigned long flags;
int round; int round;
st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in); st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
...@@ -315,6 +325,12 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ...@@ -315,6 +325,12 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8); st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12); st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
/*
* Temporarily disable interrupts to avoid races where cachelines are
* evicted when the CPU is interrupted to do something else.
*/
local_irq_save(flags);
st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128]; st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128];
st0[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160]; st0[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160];
st0[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192]; st0[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192];
...@@ -339,6 +355,8 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ...@@ -339,6 +355,8 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4); put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8); put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12); put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
local_irq_restore(flags);
} }
static struct crypto_alg aes_alg = { static struct crypto_alg aes_alg = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment