Commit 60af520c authored by Tadeusz Struk's avatar Tadeusz Struk Committed by Herbert Xu

crypto: aesni-intel - fixed problem with packets that are not multiple of 64bytes

This patch fixes problem with packets that are not multiple of 64bytes.
Signed-off-by: default avatarAdrian Hoban <adrian.hoban@intel.com>
Signed-off-by: default avatarAidan O'Mahony <aidan.o.mahony@intel.com>
Signed-off-by: default avatarGabriele Paoloni <gabriele.paoloni@intel.com>
Signed-off-by: default avatarTadeusz Struk <tadeusz.struk@intel.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 16c29daf
...@@ -1612,6 +1612,7 @@ _zero_cipher_left_encrypt: ...@@ -1612,6 +1612,7 @@ _zero_cipher_left_encrypt:
movdqa SHUF_MASK(%rip), %xmm10 movdqa SHUF_MASK(%rip), %xmm10
PSHUFB_XMM %xmm10, %xmm0 PSHUFB_XMM %xmm10, %xmm0
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
sub $16, %r11 sub $16, %r11
add %r13, %r11 add %r13, %r11
...@@ -1634,7 +1635,9 @@ _zero_cipher_left_encrypt: ...@@ -1634,7 +1635,9 @@ _zero_cipher_left_encrypt:
# GHASH computation for the last <16 byte block # GHASH computation for the last <16 byte block
sub %r13, %r11 sub %r13, %r11
add $16, %r11 add $16, %r11
PSHUFB_XMM %xmm10, %xmm1
movdqa SHUF_MASK(%rip), %xmm10
PSHUFB_XMM %xmm10, %xmm0
# shuffle xmm0 back to output as ciphertext # shuffle xmm0 back to output as ciphertext
......
...@@ -828,9 +828,15 @@ static int rfc4106_init(struct crypto_tfm *tfm) ...@@ -828,9 +828,15 @@ static int rfc4106_init(struct crypto_tfm *tfm)
struct cryptd_aead *cryptd_tfm; struct cryptd_aead *cryptd_tfm;
struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
struct crypto_aead *cryptd_child;
struct aesni_rfc4106_gcm_ctx *child_ctx;
cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
if (IS_ERR(cryptd_tfm)) if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm); return PTR_ERR(cryptd_tfm);
cryptd_child = cryptd_aead_child(cryptd_tfm);
child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
memcpy(child_ctx, ctx, sizeof(*ctx));
ctx->cryptd_tfm = cryptd_tfm; ctx->cryptd_tfm = cryptd_tfm;
tfm->crt_aead.reqsize = sizeof(struct aead_request) tfm->crt_aead.reqsize = sizeof(struct aead_request)
+ crypto_aead_reqsize(&cryptd_tfm->base); + crypto_aead_reqsize(&cryptd_tfm->base);
...@@ -923,6 +929,9 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, ...@@ -923,6 +929,9 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
int ret = 0; int ret = 0;
struct crypto_tfm *tfm = crypto_aead_tfm(parent); struct crypto_tfm *tfm = crypto_aead_tfm(parent);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
struct aesni_rfc4106_gcm_ctx *child_ctx =
aesni_rfc4106_gcm_ctx_get(cryptd_child);
u8 *new_key_mem = NULL; u8 *new_key_mem = NULL;
if (key_len < 4) { if (key_len < 4) {
...@@ -966,6 +975,7 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, ...@@ -966,6 +975,7 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
goto exit; goto exit;
} }
ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
memcpy(child_ctx, ctx, sizeof(*ctx));
exit: exit:
kfree(new_key_mem); kfree(new_key_mem);
return ret; return ret;
...@@ -997,7 +1007,6 @@ static int rfc4106_encrypt(struct aead_request *req) ...@@ -997,7 +1007,6 @@ static int rfc4106_encrypt(struct aead_request *req)
int ret; int ret;
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
if (!irq_fpu_usable()) { if (!irq_fpu_usable()) {
struct aead_request *cryptd_req = struct aead_request *cryptd_req =
...@@ -1006,6 +1015,7 @@ static int rfc4106_encrypt(struct aead_request *req) ...@@ -1006,6 +1015,7 @@ static int rfc4106_encrypt(struct aead_request *req)
aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_aead_encrypt(cryptd_req); return crypto_aead_encrypt(cryptd_req);
} else { } else {
struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
kernel_fpu_begin(); kernel_fpu_begin();
ret = cryptd_child->base.crt_aead.encrypt(req); ret = cryptd_child->base.crt_aead.encrypt(req);
kernel_fpu_end(); kernel_fpu_end();
...@@ -1018,7 +1028,6 @@ static int rfc4106_decrypt(struct aead_request *req) ...@@ -1018,7 +1028,6 @@ static int rfc4106_decrypt(struct aead_request *req)
int ret; int ret;
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
if (!irq_fpu_usable()) { if (!irq_fpu_usable()) {
struct aead_request *cryptd_req = struct aead_request *cryptd_req =
...@@ -1027,6 +1036,7 @@ static int rfc4106_decrypt(struct aead_request *req) ...@@ -1027,6 +1036,7 @@ static int rfc4106_decrypt(struct aead_request *req)
aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_aead_decrypt(cryptd_req); return crypto_aead_decrypt(cryptd_req);
} else { } else {
struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
kernel_fpu_begin(); kernel_fpu_begin();
ret = cryptd_child->base.crt_aead.decrypt(req); ret = cryptd_child->base.crt_aead.decrypt(req);
kernel_fpu_end(); kernel_fpu_end();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment