Commit 37dc7956 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "Here is the crypto update for 4.15:

  API:

   - Disambiguate EBUSY when queueing crypto request by adding ENOSPC.
     This change touches code outside the crypto API.
   - Reset settings when empty string is written to rng_current.

  Algorithms:

   - Add OSCCA SM3 secure hash.

  Drivers:

   - Remove old mv_cesa driver (replaced by marvell/cesa).
   - Enable rfc3686/ecb/cfb/ofb AES in crypto4xx.
   - Add ccm/gcm AES in crypto4xx.
   - Add support for BCM7278 in iproc-rng200.
   - Add hash support on Exynos in s5p-sss.
   - Fix fallback-induced error in vmx.
   - Fix output IV in atmel-aes.
   - Fix empty GCM hash in mediatek.

  Others:

   - Fix DoS potential in lib/mpi.
   - Fix potential out-of-order issues with padata"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (162 commits)
  lib/mpi: call cond_resched() from mpi_powm() loop
  crypto: stm32/hash - Fix return issue on update
  crypto: dh - Remove pointless checks for NULL 'p' and 'g'
  crypto: qat - Clean up error handling in qat_dh_set_secret()
  crypto: dh - Don't permit 'key' or 'g' size longer than 'p'
  crypto: dh - Don't permit 'p' to be 0
  crypto: dh - Fix double free of ctx->p
  hwrng: iproc-rng200 - Add support for BCM7278
  dt-bindings: rng: Document BCM7278 RNG200 compatible
  crypto: chcr - Replace _manual_ swap with swap macro
  crypto: marvell - Add a NULL entry at the end of mv_cesa_plat_id_table[]
  hwrng: virtio - Virtio RNG devices need to be re-registered after suspend/resume
  crypto: atmel - remove empty functions
  crypto: ecdh - remove empty exit()
  MAINTAINERS: update maintainer for qat
  crypto: caam - remove unused param of ctx_map_to_sec4_sg()
  crypto: caam - remove unneeded edesc zeroization
  crypto: atmel-aes - Reset the controller before each use
  crypto: atmel-aes - properly set IV after {en,de}crypt
  hwrng: core - Reset user selected rng by writing "" to rng_current
  ...
parents 894025f2 1d9ddde1
...@@ -7,59 +7,27 @@ Code Example For Symmetric Key Cipher Operation ...@@ -7,59 +7,27 @@ Code Example For Symmetric Key Cipher Operation
:: ::
struct tcrypt_result {
struct completion completion;
int err;
};
/* tie all data structures together */ /* tie all data structures together */
struct skcipher_def { struct skcipher_def {
struct scatterlist sg; struct scatterlist sg;
struct crypto_skcipher *tfm; struct crypto_skcipher *tfm;
struct skcipher_request *req; struct skcipher_request *req;
struct tcrypt_result result; struct crypto_wait wait;
}; };
/* Callback function */
static void test_skcipher_cb(struct crypto_async_request *req, int error)
{
struct tcrypt_result *result = req->data;
if (error == -EINPROGRESS)
return;
result->err = error;
complete(&result->completion);
pr_info("Encryption finished successfully\n");
}
/* Perform cipher operation */ /* Perform cipher operation */
static unsigned int test_skcipher_encdec(struct skcipher_def *sk, static unsigned int test_skcipher_encdec(struct skcipher_def *sk,
int enc) int enc)
{ {
int rc = 0; int rc;
if (enc) if (enc)
rc = crypto_skcipher_encrypt(sk->req); rc = crypto_wait_req(crypto_skcipher_encrypt(sk->req), &sk->wait);
else else
rc = crypto_skcipher_decrypt(sk->req); rc = crypto_wait_req(crypto_skcipher_decrypt(sk->req), &sk->wait);
switch (rc) { if (rc)
case 0: pr_info("skcipher encrypt returned with result %d\n", rc);
break;
case -EINPROGRESS:
case -EBUSY:
rc = wait_for_completion_interruptible(
&sk->result.completion);
if (!rc && !sk->result.err) {
reinit_completion(&sk->result.completion);
break;
}
default:
pr_info("skcipher encrypt returned with %d result %d\n",
rc, sk->result.err);
break;
}
init_completion(&sk->result.completion);
return rc; return rc;
} }
...@@ -89,8 +57,8 @@ Code Example For Symmetric Key Cipher Operation ...@@ -89,8 +57,8 @@ Code Example For Symmetric Key Cipher Operation
} }
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
test_skcipher_cb, crypto_req_done,
&sk.result); &sk.wait);
/* AES 256 with random key */ /* AES 256 with random key */
get_random_bytes(&key, 32); get_random_bytes(&key, 32);
...@@ -122,7 +90,7 @@ Code Example For Symmetric Key Cipher Operation ...@@ -122,7 +90,7 @@ Code Example For Symmetric Key Cipher Operation
/* We encrypt one block */ /* We encrypt one block */
sg_init_one(&sk.sg, scratchpad, 16); sg_init_one(&sk.sg, scratchpad, 16);
skcipher_request_set_crypt(req, &sk.sg, &sk.sg, 16, ivdata); skcipher_request_set_crypt(req, &sk.sg, &sk.sg, 16, ivdata);
init_completion(&sk.result.completion); crypto_init_wait(&sk.wait);
/* encrypt data */ /* encrypt data */
ret = test_skcipher_encdec(&sk, 1); ret = test_skcipher_encdec(&sk, 1);
......
HWRNG support for the iproc-rng200 driver HWRNG support for the iproc-rng200 driver
Required properties: Required properties:
- compatible : "brcm,iproc-rng200" - compatible : Must be one of:
"brcm,bcm7278-rng200"
"brcm,iproc-rng200"
- reg : base address and size of control register block - reg : base address and size of control register block
Example: Example:
......
...@@ -5484,7 +5484,7 @@ F: include/uapi/linux/fb.h ...@@ -5484,7 +5484,7 @@ F: include/uapi/linux/fb.h
FREESCALE CAAM (Cryptographic Acceleration and Assurance Module) DRIVER FREESCALE CAAM (Cryptographic Acceleration and Assurance Module) DRIVER
M: Horia Geantă <horia.geanta@nxp.com> M: Horia Geantă <horia.geanta@nxp.com>
M: Dan Douglass <dan.douglass@nxp.com> M: Aymen Sghaier <aymen.sghaier@nxp.com>
L: linux-crypto@vger.kernel.org L: linux-crypto@vger.kernel.org
S: Maintained S: Maintained
F: drivers/crypto/caam/ F: drivers/crypto/caam/
...@@ -11060,7 +11060,6 @@ F: drivers/mtd/nand/pxa3xx_nand.c ...@@ -11060,7 +11060,6 @@ F: drivers/mtd/nand/pxa3xx_nand.c
QAT DRIVER QAT DRIVER
M: Giovanni Cabiddu <giovanni.cabiddu@intel.com> M: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
M: Salvatore Benedetto <salvatore.benedetto@intel.com>
L: qat-linux@intel.com L: qat-linux@intel.com
S: Supported S: Supported
F: drivers/crypto/qat/ F: drivers/crypto/qat/
...@@ -11793,7 +11792,7 @@ L: linux-crypto@vger.kernel.org ...@@ -11793,7 +11792,7 @@ L: linux-crypto@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org L: linux-samsung-soc@vger.kernel.org
S: Maintained S: Maintained
F: drivers/crypto/exynos-rng.c F: drivers/crypto/exynos-rng.c
F: Documentation/devicetree/bindings/rng/samsung,exynos-rng4.txt F: Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
SAMSUNG FRAMEBUFFER DRIVER SAMSUNG FRAMEBUFFER DRIVER
M: Jingoo Han <jingoohan1@gmail.com> M: Jingoo Han <jingoohan1@gmail.com>
......
...@@ -140,6 +140,6 @@ CONFIG_CRYPTO_TWOFISH=y ...@@ -140,6 +140,6 @@ CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEFLATE=y CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y CONFIG_CRYPTO_LZO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_MV_CESA=y CONFIG_CRYPTO_DEV_MARVELL_CESA=y
CONFIG_CRC_CCITT=y CONFIG_CRC_CCITT=y
CONFIG_LIBCRC32C=y CONFIG_LIBCRC32C=y
...@@ -279,6 +279,6 @@ CONFIG_DEBUG_KERNEL=y ...@@ -279,6 +279,6 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_USER=y CONFIG_DEBUG_USER=y
CONFIG_CRYPTO_CBC=m CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_DEV_MV_CESA=y CONFIG_CRYPTO_DEV_MARVELL_CESA=y
CONFIG_CRC_CCITT=y CONFIG_CRC_CCITT=y
CONFIG_LIBCRC32C=y CONFIG_LIBCRC32C=y
...@@ -163,5 +163,5 @@ CONFIG_CRYPTO_CBC=m ...@@ -163,5 +163,5 @@ CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_MV_CESA=y CONFIG_CRYPTO_DEV_MARVELL_CESA=y
CONFIG_CRC_T10DIF=y CONFIG_CRC_T10DIF=y
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <crypto/cryptd.h> #include <crypto/cryptd.h>
#include <crypto/ctr.h> #include <crypto/ctr.h>
#include <crypto/b128ops.h> #include <crypto/b128ops.h>
#include <crypto/gcm.h>
#include <crypto/xts.h> #include <crypto/xts.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/fpu/api.h> #include <asm/fpu/api.h>
...@@ -1067,9 +1068,10 @@ static struct skcipher_alg aesni_skciphers[] = { ...@@ -1067,9 +1068,10 @@ static struct skcipher_alg aesni_skciphers[] = {
} }
}; };
static
struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)]; struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
struct { static struct {
const char *algname; const char *algname;
const char *drvname; const char *drvname;
const char *basename; const char *basename;
...@@ -1131,7 +1133,7 @@ static struct aead_alg aesni_aead_algs[] = { { ...@@ -1131,7 +1133,7 @@ static struct aead_alg aesni_aead_algs[] = { {
.setauthsize = common_rfc4106_set_authsize, .setauthsize = common_rfc4106_set_authsize,
.encrypt = helper_rfc4106_encrypt, .encrypt = helper_rfc4106_encrypt,
.decrypt = helper_rfc4106_decrypt, .decrypt = helper_rfc4106_decrypt,
.ivsize = 8, .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = 16, .maxauthsize = 16,
.base = { .base = {
.cra_name = "__gcm-aes-aesni", .cra_name = "__gcm-aes-aesni",
...@@ -1149,7 +1151,7 @@ static struct aead_alg aesni_aead_algs[] = { { ...@@ -1149,7 +1151,7 @@ static struct aead_alg aesni_aead_algs[] = { {
.setauthsize = rfc4106_set_authsize, .setauthsize = rfc4106_set_authsize,
.encrypt = rfc4106_encrypt, .encrypt = rfc4106_encrypt,
.decrypt = rfc4106_decrypt, .decrypt = rfc4106_decrypt,
.ivsize = 8, .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = 16, .maxauthsize = 16,
.base = { .base = {
.cra_name = "rfc4106(gcm(aes))", .cra_name = "rfc4106(gcm(aes))",
...@@ -1165,7 +1167,7 @@ static struct aead_alg aesni_aead_algs[] = { { ...@@ -1165,7 +1167,7 @@ static struct aead_alg aesni_aead_algs[] = { {
.setauthsize = generic_gcmaes_set_authsize, .setauthsize = generic_gcmaes_set_authsize,
.encrypt = generic_gcmaes_encrypt, .encrypt = generic_gcmaes_encrypt,
.decrypt = generic_gcmaes_decrypt, .decrypt = generic_gcmaes_decrypt,
.ivsize = 12, .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = 16, .maxauthsize = 16,
.base = { .base = {
.cra_name = "gcm(aes)", .cra_name = "gcm(aes)",
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <asm/inst.h> #include <asm/inst.h>
.section .rodata
.align 16 .align 16
/* /*
* [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4 * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
...@@ -111,19 +112,13 @@ ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */ ...@@ -111,19 +112,13 @@ ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
pxor CONSTANT, %xmm1 pxor CONSTANT, %xmm1
sub $0x40, LEN sub $0x40, LEN
add $0x40, BUF add $0x40, BUF
#ifndef __x86_64__
/* This is for position independent code(-fPIC) support for 32bit */
call delta
delta:
pop %ecx
#endif
cmp $0x40, LEN cmp $0x40, LEN
jb less_64 jb less_64
#ifdef __x86_64__ #ifdef __x86_64__
movdqa .Lconstant_R2R1(%rip), CONSTANT movdqa .Lconstant_R2R1(%rip), CONSTANT
#else #else
movdqa .Lconstant_R2R1 - delta(%ecx), CONSTANT movdqa .Lconstant_R2R1, CONSTANT
#endif #endif
loop_64:/* 64 bytes Full cache line folding */ loop_64:/* 64 bytes Full cache line folding */
...@@ -172,7 +167,7 @@ less_64:/* Folding cache line into 128bit */ ...@@ -172,7 +167,7 @@ less_64:/* Folding cache line into 128bit */
#ifdef __x86_64__ #ifdef __x86_64__
movdqa .Lconstant_R4R3(%rip), CONSTANT movdqa .Lconstant_R4R3(%rip), CONSTANT
#else #else
movdqa .Lconstant_R4R3 - delta(%ecx), CONSTANT movdqa .Lconstant_R4R3, CONSTANT
#endif #endif
prefetchnta (BUF) prefetchnta (BUF)
...@@ -220,8 +215,8 @@ fold_64: ...@@ -220,8 +215,8 @@ fold_64:
movdqa .Lconstant_R5(%rip), CONSTANT movdqa .Lconstant_R5(%rip), CONSTANT
movdqa .Lconstant_mask32(%rip), %xmm3 movdqa .Lconstant_mask32(%rip), %xmm3
#else #else
movdqa .Lconstant_R5 - delta(%ecx), CONSTANT movdqa .Lconstant_R5, CONSTANT
movdqa .Lconstant_mask32 - delta(%ecx), %xmm3 movdqa .Lconstant_mask32, %xmm3
#endif #endif
psrldq $0x04, %xmm2 psrldq $0x04, %xmm2
pand %xmm3, %xmm1 pand %xmm3, %xmm1
...@@ -232,7 +227,7 @@ fold_64: ...@@ -232,7 +227,7 @@ fold_64:
#ifdef __x86_64__ #ifdef __x86_64__
movdqa .Lconstant_RUpoly(%rip), CONSTANT movdqa .Lconstant_RUpoly(%rip), CONSTANT
#else #else
movdqa .Lconstant_RUpoly - delta(%ecx), CONSTANT movdqa .Lconstant_RUpoly, CONSTANT
#endif #endif
movdqa %xmm1, %xmm2 movdqa %xmm1, %xmm2
pand %xmm3, %xmm1 pand %xmm3, %xmm1
......
...@@ -860,6 +860,17 @@ config CRYPTO_SHA3 ...@@ -860,6 +860,17 @@ config CRYPTO_SHA3
References: References:
http://keccak.noekeon.org/ http://keccak.noekeon.org/
config CRYPTO_SM3
tristate "SM3 digest algorithm"
select CRYPTO_HASH
help
SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3).
It is part of the Chinese Commercial Cryptography suite.
References:
http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
https://datatracker.ietf.org/doc/html/draft-shen-sm3-hash
config CRYPTO_TGR192 config CRYPTO_TGR192
tristate "Tiger digest algorithms" tristate "Tiger digest algorithms"
select CRYPTO_HASH select CRYPTO_HASH
......
...@@ -71,6 +71,7 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o ...@@ -71,6 +71,7 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
obj-$(CONFIG_CRYPTO_SM3) += sm3_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o obj-$(CONFIG_CRYPTO_WP512) += wp512.o
CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
......
...@@ -481,33 +481,6 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) ...@@ -481,33 +481,6 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
} }
EXPORT_SYMBOL_GPL(af_alg_cmsg_send); EXPORT_SYMBOL_GPL(af_alg_cmsg_send);
int af_alg_wait_for_completion(int err, struct af_alg_completion *completion)
{
switch (err) {
case -EINPROGRESS:
case -EBUSY:
wait_for_completion(&completion->completion);
reinit_completion(&completion->completion);
err = completion->err;
break;
};
return err;
}
EXPORT_SYMBOL_GPL(af_alg_wait_for_completion);
void af_alg_complete(struct crypto_async_request *req, int err)
{
struct af_alg_completion *completion = req->data;
if (err == -EINPROGRESS)
return;
completion->err = err;
complete(&completion->completion);
}
EXPORT_SYMBOL_GPL(af_alg_complete);
/** /**
* af_alg_alloc_tsgl - allocate the TX SGL * af_alg_alloc_tsgl - allocate the TX SGL
* *
......
...@@ -334,9 +334,7 @@ static int ahash_op_unaligned(struct ahash_request *req, ...@@ -334,9 +334,7 @@ static int ahash_op_unaligned(struct ahash_request *req,
return err; return err;
err = op(req); err = op(req);
if (err == -EINPROGRESS || if (err == -EINPROGRESS || err == -EBUSY)
(err == -EBUSY && (ahash_request_flags(req) &
CRYPTO_TFM_REQ_MAY_BACKLOG)))
return err; return err;
ahash_restore_req(req, err); ahash_restore_req(req, err);
...@@ -394,9 +392,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err) ...@@ -394,9 +392,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
req->base.complete = ahash_def_finup_done2; req->base.complete = ahash_def_finup_done2;
err = crypto_ahash_reqtfm(req)->final(req); err = crypto_ahash_reqtfm(req)->final(req);
if (err == -EINPROGRESS || if (err == -EINPROGRESS || err == -EBUSY)
(err == -EBUSY && (ahash_request_flags(req) &
CRYPTO_TFM_REQ_MAY_BACKLOG)))
return err; return err;
out: out:
...@@ -432,9 +428,7 @@ static int ahash_def_finup(struct ahash_request *req) ...@@ -432,9 +428,7 @@ static int ahash_def_finup(struct ahash_request *req)
return err; return err;
err = tfm->update(req); err = tfm->update(req);
if (err == -EINPROGRESS || if (err == -EINPROGRESS || err == -EBUSY)
(err == -EBUSY && (ahash_request_flags(req) &
CRYPTO_TFM_REQ_MAY_BACKLOG)))
return err; return err;
return ahash_def_finup_finish1(req, err); return ahash_def_finup_finish1(req, err);
......
...@@ -897,9 +897,11 @@ int crypto_enqueue_request(struct crypto_queue *queue, ...@@ -897,9 +897,11 @@ int crypto_enqueue_request(struct crypto_queue *queue,
int err = -EINPROGRESS; int err = -EINPROGRESS;
if (unlikely(queue->qlen >= queue->max_qlen)) { if (unlikely(queue->qlen >= queue->max_qlen)) {
err = -EBUSY; if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) err = -ENOSPC;
goto out; goto out;
}
err = -EBUSY;
if (queue->backlog == &queue->list) if (queue->backlog == &queue->list)
queue->backlog = &request->list; queue->backlog = &request->list;
} }
......
...@@ -122,7 +122,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) ...@@ -122,7 +122,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
int notnum = 0; int notnum = 0;
name = ++p; name = ++p;
len = 0;
for (; isalnum(*p) || *p == '-' || *p == '_'; p++) for (; isalnum(*p) || *p == '-' || *p == '_'; p++)
notnum |= !isdigit(*p); notnum |= !isdigit(*p);
......
...@@ -278,11 +278,11 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, ...@@ -278,11 +278,11 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* Synchronous operation */ /* Synchronous operation */
aead_request_set_callback(&areq->cra_u.aead_req, aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion); crypto_req_done, &ctx->wait);
err = af_alg_wait_for_completion(ctx->enc ? err = crypto_wait_req(ctx->enc ?
crypto_aead_encrypt(&areq->cra_u.aead_req) : crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req), crypto_aead_decrypt(&areq->cra_u.aead_req),
&ctx->completion); &ctx->wait);
} }
/* AIO operation in progress */ /* AIO operation in progress */
...@@ -554,7 +554,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) ...@@ -554,7 +554,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
ctx->merge = 0; ctx->merge = 0;
ctx->enc = 0; ctx->enc = 0;
ctx->aead_assoclen = 0; ctx->aead_assoclen = 0;
af_alg_init_completion(&ctx->completion); crypto_init_wait(&ctx->wait);
ask->private = ctx; ask->private = ctx;
......
...@@ -26,7 +26,7 @@ struct hash_ctx { ...@@ -26,7 +26,7 @@ struct hash_ctx {
u8 *result; u8 *result;
struct af_alg_completion completion; struct crypto_wait wait;
unsigned int len; unsigned int len;
bool more; bool more;
...@@ -88,8 +88,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -88,8 +88,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
if ((msg->msg_flags & MSG_MORE)) if ((msg->msg_flags & MSG_MORE))
hash_free_result(sk, ctx); hash_free_result(sk, ctx);
err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req), err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait);
&ctx->completion);
if (err) if (err)
goto unlock; goto unlock;
} }
...@@ -110,8 +109,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -110,8 +109,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len); ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req), err = crypto_wait_req(crypto_ahash_update(&ctx->req),
&ctx->completion); &ctx->wait);
af_alg_free_sg(&ctx->sgl); af_alg_free_sg(&ctx->sgl);
if (err) if (err)
goto unlock; goto unlock;
...@@ -129,8 +128,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -129,8 +128,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
goto unlock; goto unlock;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), err = crypto_wait_req(crypto_ahash_final(&ctx->req),
&ctx->completion); &ctx->wait);
} }
unlock: unlock:
...@@ -171,7 +170,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, ...@@ -171,7 +170,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
} else { } else {
if (!ctx->more) { if (!ctx->more) {
err = crypto_ahash_init(&ctx->req); err = crypto_ahash_init(&ctx->req);
err = af_alg_wait_for_completion(err, &ctx->completion); err = crypto_wait_req(err, &ctx->wait);
if (err) if (err)
goto unlock; goto unlock;
} }
...@@ -179,7 +178,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, ...@@ -179,7 +178,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
err = crypto_ahash_update(&ctx->req); err = crypto_ahash_update(&ctx->req);
} }
err = af_alg_wait_for_completion(err, &ctx->completion); err = crypto_wait_req(err, &ctx->wait);
if (err) if (err)
goto unlock; goto unlock;
...@@ -215,17 +214,16 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ...@@ -215,17 +214,16 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
if (!result && !ctx->more) { if (!result && !ctx->more) {
err = af_alg_wait_for_completion( err = crypto_wait_req(crypto_ahash_init(&ctx->req),
crypto_ahash_init(&ctx->req), &ctx->wait);
&ctx->completion);
if (err) if (err)
goto unlock; goto unlock;
} }
if (!result || ctx->more) { if (!result || ctx->more) {
ctx->more = 0; ctx->more = 0;
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), err = crypto_wait_req(crypto_ahash_final(&ctx->req),
&ctx->completion); &ctx->wait);
if (err) if (err)
goto unlock; goto unlock;
} }
...@@ -476,13 +474,13 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) ...@@ -476,13 +474,13 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
ctx->result = NULL; ctx->result = NULL;
ctx->len = len; ctx->len = len;
ctx->more = 0; ctx->more = 0;
af_alg_init_completion(&ctx->completion); crypto_init_wait(&ctx->wait);
ask->private = ctx; ask->private = ctx;
ahash_request_set_tfm(&ctx->req, hash); ahash_request_set_tfm(&ctx->req, hash);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion); crypto_req_done, &ctx->wait);
sk->sk_destruct = hash_sock_destruct; sk->sk_destruct = hash_sock_destruct;
......
...@@ -129,12 +129,11 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, ...@@ -129,12 +129,11 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
skcipher_request_set_callback(&areq->cra_u.skcipher_req, skcipher_request_set_callback(&areq->cra_u.skcipher_req,
CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, crypto_req_done, &ctx->wait);
&ctx->completion); err = crypto_wait_req(ctx->enc ?
err = af_alg_wait_for_completion(ctx->enc ?
crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req), crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
&ctx->completion); &ctx->wait);
} }
/* AIO operation in progress */ /* AIO operation in progress */
...@@ -388,7 +387,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) ...@@ -388,7 +387,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
ctx->more = 0; ctx->more = 0;
ctx->merge = 0; ctx->merge = 0;
ctx->enc = 0; ctx->enc = 0;
af_alg_init_completion(&ctx->completion); crypto_init_wait(&ctx->wait);
ask->private = ctx; ask->private = ctx;
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/completion.h>
#include "internal.h" #include "internal.h"
LIST_HEAD(crypto_alg_list); LIST_HEAD(crypto_alg_list);
...@@ -595,5 +596,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask) ...@@ -595,5 +596,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask)
} }
EXPORT_SYMBOL_GPL(crypto_has_alg); EXPORT_SYMBOL_GPL(crypto_has_alg);
void crypto_req_done(struct crypto_async_request *req, int err)
{
struct crypto_wait *wait = req->data;
if (err == -EINPROGRESS)
return;
wait->err = err;
complete(&wait->completion);
}
EXPORT_SYMBOL_GPL(crypto_req_done);
MODULE_DESCRIPTION("Cryptographic core API"); MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -57,29 +57,13 @@ static void public_key_destroy(void *payload0, void *payload3) ...@@ -57,29 +57,13 @@ static void public_key_destroy(void *payload0, void *payload3)
public_key_signature_free(payload3); public_key_signature_free(payload3);
} }
struct public_key_completion {
struct completion completion;
int err;
};
static void public_key_verify_done(struct crypto_async_request *req, int err)
{
struct public_key_completion *compl = req->data;
if (err == -EINPROGRESS)
return;
compl->err = err;
complete(&compl->completion);
}
/* /*
* Verify a signature using a public key. * Verify a signature using a public key.
*/ */
int public_key_verify_signature(const struct public_key *pkey, int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig) const struct public_key_signature *sig)
{ {
struct public_key_completion compl; struct crypto_wait cwait;
struct crypto_akcipher *tfm; struct crypto_akcipher *tfm;
struct akcipher_request *req; struct akcipher_request *req;
struct scatterlist sig_sg, digest_sg; struct scatterlist sig_sg, digest_sg;
...@@ -131,20 +115,16 @@ int public_key_verify_signature(const struct public_key *pkey, ...@@ -131,20 +115,16 @@ int public_key_verify_signature(const struct public_key *pkey,
sg_init_one(&digest_sg, output, outlen); sg_init_one(&digest_sg, output, outlen);
akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size, akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
outlen); outlen);
init_completion(&compl.completion); crypto_init_wait(&cwait);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP, CRYPTO_TFM_REQ_MAY_SLEEP,
public_key_verify_done, &compl); crypto_req_done, &cwait);
/* Perform the verification calculation. This doesn't actually do the /* Perform the verification calculation. This doesn't actually do the
* verification, but rather calculates the hash expected by the * verification, but rather calculates the hash expected by the
* signature and returns that to us. * signature and returns that to us.
*/ */
ret = crypto_akcipher_verify(req); ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
wait_for_completion(&compl.completion);
ret = compl.err;
}
if (ret < 0) if (ret < 0)
goto out_free_output; goto out_free_output;
......
...@@ -137,16 +137,14 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, ...@@ -137,16 +137,14 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
int cpu, err; int cpu, err;
struct cryptd_cpu_queue *cpu_queue; struct cryptd_cpu_queue *cpu_queue;
atomic_t *refcnt; atomic_t *refcnt;
bool may_backlog;
cpu = get_cpu(); cpu = get_cpu();
cpu_queue = this_cpu_ptr(queue->cpu_queue); cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request); err = crypto_enqueue_request(&cpu_queue->queue, request);
refcnt = crypto_tfm_ctx(request->tfm); refcnt = crypto_tfm_ctx(request->tfm);
may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
if (err == -EBUSY && !may_backlog) if (err == -ENOSPC)
goto out_put_cpu; goto out_put_cpu;
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
......
...@@ -136,8 +136,7 @@ static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err) ...@@ -136,8 +136,7 @@ static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err)
goto out; goto out;
err = cts_cbc_encrypt(req); err = cts_cbc_encrypt(req);
if (err == -EINPROGRESS || if (err == -EINPROGRESS || err == -EBUSY)
(err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return; return;
out: out:
...@@ -229,8 +228,7 @@ static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err) ...@@ -229,8 +228,7 @@ static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err)
goto out; goto out;
err = cts_cbc_decrypt(req); err = cts_cbc_decrypt(req);
if (err == -EINPROGRESS || if (err == -EINPROGRESS || err == -EBUSY)
(err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return; return;
out: out:
......
...@@ -21,19 +21,12 @@ struct dh_ctx { ...@@ -21,19 +21,12 @@ struct dh_ctx {
MPI xa; MPI xa;
}; };
static inline void dh_clear_params(struct dh_ctx *ctx) static void dh_clear_ctx(struct dh_ctx *ctx)
{ {
mpi_free(ctx->p); mpi_free(ctx->p);
mpi_free(ctx->g); mpi_free(ctx->g);
ctx->p = NULL;
ctx->g = NULL;
}
static void dh_free_ctx(struct dh_ctx *ctx)
{
dh_clear_params(ctx);
mpi_free(ctx->xa); mpi_free(ctx->xa);
ctx->xa = NULL; memset(ctx, 0, sizeof(*ctx));
} }
/* /*
...@@ -60,9 +53,6 @@ static int dh_check_params_length(unsigned int p_len) ...@@ -60,9 +53,6 @@ static int dh_check_params_length(unsigned int p_len)
static int dh_set_params(struct dh_ctx *ctx, struct dh *params) static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
{ {
if (unlikely(!params->p || !params->g))
return -EINVAL;
if (dh_check_params_length(params->p_size << 3)) if (dh_check_params_length(params->p_size << 3))
return -EINVAL; return -EINVAL;
...@@ -71,10 +61,8 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params) ...@@ -71,10 +61,8 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
return -EINVAL; return -EINVAL;
ctx->g = mpi_read_raw_data(params->g, params->g_size); ctx->g = mpi_read_raw_data(params->g, params->g_size);
if (!ctx->g) { if (!ctx->g)
mpi_free(ctx->p);
return -EINVAL; return -EINVAL;
}
return 0; return 0;
} }
...@@ -86,21 +74,23 @@ static int dh_set_secret(struct crypto_kpp *tfm, const void *buf, ...@@ -86,21 +74,23 @@ static int dh_set_secret(struct crypto_kpp *tfm, const void *buf,
struct dh params; struct dh params;
/* Free the old MPI key if any */ /* Free the old MPI key if any */
dh_free_ctx(ctx); dh_clear_ctx(ctx);
if (crypto_dh_decode_key(buf, len, &params) < 0) if (crypto_dh_decode_key(buf, len, &params) < 0)
return -EINVAL; goto err_clear_ctx;
if (dh_set_params(ctx, &params) < 0) if (dh_set_params(ctx, &params) < 0)
return -EINVAL; goto err_clear_ctx;
ctx->xa = mpi_read_raw_data(params.key, params.key_size); ctx->xa = mpi_read_raw_data(params.key, params.key_size);
if (!ctx->xa) { if (!ctx->xa)
dh_clear_params(ctx); goto err_clear_ctx;
return -EINVAL;
}
return 0; return 0;
err_clear_ctx:
dh_clear_ctx(ctx);
return -EINVAL;
} }
static int dh_compute_value(struct kpp_request *req) static int dh_compute_value(struct kpp_request *req)
...@@ -158,7 +148,7 @@ static void dh_exit_tfm(struct crypto_kpp *tfm) ...@@ -158,7 +148,7 @@ static void dh_exit_tfm(struct crypto_kpp *tfm)
{ {
struct dh_ctx *ctx = dh_get_ctx(tfm); struct dh_ctx *ctx = dh_get_ctx(tfm);
dh_free_ctx(ctx); dh_clear_ctx(ctx);
} }
static struct kpp_alg dh = { static struct kpp_alg dh = {
......
...@@ -28,12 +28,12 @@ static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size) ...@@ -28,12 +28,12 @@ static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size)
return src + size; return src + size;
} }
static inline int dh_data_size(const struct dh *p) static inline unsigned int dh_data_size(const struct dh *p)
{ {
return p->key_size + p->p_size + p->g_size; return p->key_size + p->p_size + p->g_size;
} }
int crypto_dh_key_len(const struct dh *p) unsigned int crypto_dh_key_len(const struct dh *p)
{ {
return DH_KPP_SECRET_MIN_SIZE + dh_data_size(p); return DH_KPP_SECRET_MIN_SIZE + dh_data_size(p);
} }
...@@ -83,6 +83,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) ...@@ -83,6 +83,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
if (secret.len != crypto_dh_key_len(params)) if (secret.len != crypto_dh_key_len(params))
return -EINVAL; return -EINVAL;
/*
* Don't permit the buffer for 'key' or 'g' to be larger than 'p', since
* some drivers assume otherwise.
*/
if (params->key_size > params->p_size ||
params->g_size > params->p_size)
return -EINVAL;
/* Don't allocate memory. Set pointers to data within /* Don't allocate memory. Set pointers to data within
* the given buffer * the given buffer
*/ */
...@@ -90,6 +98,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) ...@@ -90,6 +98,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
params->p = (void *)(ptr + params->key_size); params->p = (void *)(ptr + params->key_size);
params->g = (void *)(ptr + params->key_size + params->p_size); params->g = (void *)(ptr + params->key_size + params->p_size);
/*
* Don't permit 'p' to be 0. It's not a prime number, and it's subject
* to corner cases such as 'mod 0' being undefined or
* crypto_kpp_maxsize() returning 0.
*/
if (memchr_inv(params->p, 0, params->p_size) == NULL)
return -EINVAL;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(crypto_dh_decode_key); EXPORT_SYMBOL_GPL(crypto_dh_decode_key);
...@@ -1651,16 +1651,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg) ...@@ -1651,16 +1651,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
return 0; return 0;
} }
static void drbg_skcipher_cb(struct crypto_async_request *req, int error)
{
struct drbg_state *drbg = req->data;
if (error == -EINPROGRESS)
return;
drbg->ctr_async_err = error;
complete(&drbg->ctr_completion);
}
static int drbg_init_sym_kernel(struct drbg_state *drbg) static int drbg_init_sym_kernel(struct drbg_state *drbg)
{ {
struct crypto_cipher *tfm; struct crypto_cipher *tfm;
...@@ -1691,7 +1681,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) ...@@ -1691,7 +1681,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
return PTR_ERR(sk_tfm); return PTR_ERR(sk_tfm);
} }
drbg->ctr_handle = sk_tfm; drbg->ctr_handle = sk_tfm;
init_completion(&drbg->ctr_completion); crypto_init_wait(&drbg->ctr_wait);
req = skcipher_request_alloc(sk_tfm, GFP_KERNEL); req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
if (!req) { if (!req) {
...@@ -1700,8 +1690,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) ...@@ -1700,8 +1690,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
return -ENOMEM; return -ENOMEM;
} }
drbg->ctr_req = req; drbg->ctr_req = req;
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
drbg_skcipher_cb, drbg); CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &drbg->ctr_wait);
alignmask = crypto_skcipher_alignmask(sk_tfm); alignmask = crypto_skcipher_alignmask(sk_tfm);
drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask, drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask,
...@@ -1762,21 +1753,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, ...@@ -1762,21 +1753,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
/* Output buffer may not be valid for SGL, use scratchpad */ /* Output buffer may not be valid for SGL, use scratchpad */
skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out, skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
cryptlen, drbg->V); cryptlen, drbg->V);
ret = crypto_skcipher_encrypt(drbg->ctr_req); ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req),
switch (ret) { &drbg->ctr_wait);
case 0: if (ret)
break;
case -EINPROGRESS:
case -EBUSY:
wait_for_completion(&drbg->ctr_completion);
if (!drbg->ctr_async_err) {
reinit_completion(&drbg->ctr_completion);
break;
}
default:
goto out; goto out;
}
init_completion(&drbg->ctr_completion); crypto_init_wait(&drbg->ctr_wait);
memcpy(outbuf, drbg->outscratchpad, cryptlen); memcpy(outbuf, drbg->outscratchpad, cryptlen);
......
...@@ -131,17 +131,11 @@ static unsigned int ecdh_max_size(struct crypto_kpp *tfm) ...@@ -131,17 +131,11 @@ static unsigned int ecdh_max_size(struct crypto_kpp *tfm)
return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1); return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1);
} }
static void no_exit_tfm(struct crypto_kpp *tfm)
{
return;
}
static struct kpp_alg ecdh = { static struct kpp_alg ecdh = {
.set_secret = ecdh_set_secret, .set_secret = ecdh_set_secret,
.generate_public_key = ecdh_compute_value, .generate_public_key = ecdh_compute_value,
.compute_shared_secret = ecdh_compute_value, .compute_shared_secret = ecdh_compute_value,
.max_size = ecdh_max_size, .max_size = ecdh_max_size,
.exit = no_exit_tfm,
.base = { .base = {
.cra_name = "ecdh", .cra_name = "ecdh",
.cra_driver_name = "ecdh-generic", .cra_driver_name = "ecdh-generic",
......
...@@ -28,7 +28,7 @@ static inline const u8 *ecdh_unpack_data(void *dst, const void *src, size_t sz) ...@@ -28,7 +28,7 @@ static inline const u8 *ecdh_unpack_data(void *dst, const void *src, size_t sz)
return src + sz; return src + sz;
} }
int crypto_ecdh_key_len(const struct ecdh *params) unsigned int crypto_ecdh_key_len(const struct ecdh *params)
{ {
return ECDH_KPP_SECRET_MIN_SIZE + params->key_size; return ECDH_KPP_SECRET_MIN_SIZE + params->key_size;
} }
......
...@@ -14,9 +14,9 @@ ...@@ -14,9 +14,9 @@
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/null.h> #include <crypto/null.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
#include <crypto/gcm.h>
#include <crypto/hash.h> #include <crypto/hash.h>
#include "internal.h" #include "internal.h"
#include <linux/completion.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -78,11 +78,6 @@ struct crypto_gcm_req_priv_ctx { ...@@ -78,11 +78,6 @@ struct crypto_gcm_req_priv_ctx {
} u; } u;
}; };
struct crypto_gcm_setkey_result {
int err;
struct completion completion;
};
static struct { static struct {
u8 buf[16]; u8 buf[16];
struct scatterlist sg; struct scatterlist sg;
...@@ -98,17 +93,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( ...@@ -98,17 +93,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
} }
static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err)
{
struct crypto_gcm_setkey_result *result = req->data;
if (err == -EINPROGRESS)
return;
result->err = err;
complete(&result->completion);
}
static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
...@@ -119,7 +103,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, ...@@ -119,7 +103,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
be128 hash; be128 hash;
u8 iv[16]; u8 iv[16];
struct crypto_gcm_setkey_result result; struct crypto_wait wait;
struct scatterlist sg[1]; struct scatterlist sg[1];
struct skcipher_request req; struct skcipher_request req;
...@@ -140,21 +124,18 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, ...@@ -140,21 +124,18 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (!data) if (!data)
return -ENOMEM; return -ENOMEM;
init_completion(&data->result.completion); crypto_init_wait(&data->wait);
sg_init_one(data->sg, &data->hash, sizeof(data->hash)); sg_init_one(data->sg, &data->hash, sizeof(data->hash));
skcipher_request_set_tfm(&data->req, ctr); skcipher_request_set_tfm(&data->req, ctr);
skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_gcm_setkey_done, crypto_req_done,
&data->result); &data->wait);
skcipher_request_set_crypt(&data->req, data->sg, data->sg, skcipher_request_set_crypt(&data->req, data->sg, data->sg,
sizeof(data->hash), data->iv); sizeof(data->hash), data->iv);
err = crypto_skcipher_encrypt(&data->req); err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
if (err == -EINPROGRESS || err == -EBUSY) { &data->wait);
wait_for_completion(&data->result.completion);
err = data->result.err;
}
if (err) if (err)
goto out; goto out;
...@@ -197,8 +178,8 @@ static void crypto_gcm_init_common(struct aead_request *req) ...@@ -197,8 +178,8 @@ static void crypto_gcm_init_common(struct aead_request *req)
struct scatterlist *sg; struct scatterlist *sg;
memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag)); memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag));
memcpy(pctx->iv, req->iv, 12); memcpy(pctx->iv, req->iv, GCM_AES_IV_SIZE);
memcpy(pctx->iv + 12, &counter, 4); memcpy(pctx->iv + GCM_AES_IV_SIZE, &counter, 4);
sg_init_table(pctx->src, 3); sg_init_table(pctx->src, 3);
sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag));
...@@ -695,7 +676,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, ...@@ -695,7 +676,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
inst->alg.base.cra_alignmask = ghash->base.cra_alignmask | inst->alg.base.cra_alignmask = ghash->base.cra_alignmask |
ctr->base.cra_alignmask; ctr->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx); inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
inst->alg.ivsize = 12; inst->alg.ivsize = GCM_AES_IV_SIZE;
inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr); inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
inst->alg.maxauthsize = 16; inst->alg.maxauthsize = 16;
inst->alg.init = crypto_gcm_init_tfm; inst->alg.init = crypto_gcm_init_tfm;
...@@ -832,20 +813,20 @@ static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req) ...@@ -832,20 +813,20 @@ static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
crypto_aead_alignmask(child) + 1); crypto_aead_alignmask(child) + 1);
scatterwalk_map_and_copy(iv + 12, req->src, 0, req->assoclen - 8, 0); scatterwalk_map_and_copy(iv + GCM_AES_IV_SIZE, req->src, 0, req->assoclen - 8, 0);
memcpy(iv, ctx->nonce, 4); memcpy(iv, ctx->nonce, 4);
memcpy(iv + 4, req->iv, 8); memcpy(iv + 4, req->iv, 8);
sg_init_table(rctx->src, 3); sg_init_table(rctx->src, 3);
sg_set_buf(rctx->src, iv + 12, req->assoclen - 8); sg_set_buf(rctx->src, iv + GCM_AES_IV_SIZE, req->assoclen - 8);
sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen); sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
if (sg != rctx->src + 1) if (sg != rctx->src + 1)
sg_chain(rctx->src, 2, sg); sg_chain(rctx->src, 2, sg);
if (req->src != req->dst) { if (req->src != req->dst) {
sg_init_table(rctx->dst, 3); sg_init_table(rctx->dst, 3);
sg_set_buf(rctx->dst, iv + 12, req->assoclen - 8); sg_set_buf(rctx->dst, iv + GCM_AES_IV_SIZE, req->assoclen - 8);
sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen); sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
if (sg != rctx->dst + 1) if (sg != rctx->dst + 1)
sg_chain(rctx->dst, 2, sg); sg_chain(rctx->dst, 2, sg);
...@@ -957,7 +938,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, ...@@ -957,7 +938,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
err = -EINVAL; err = -EINVAL;
/* Underlying IV size must be 12. */ /* Underlying IV size must be 12. */
if (crypto_aead_alg_ivsize(alg) != 12) if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE)
goto out_drop_alg; goto out_drop_alg;
/* Not a stream cipher? */ /* Not a stream cipher? */
...@@ -980,7 +961,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, ...@@ -980,7 +961,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx); inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx);
inst->alg.ivsize = 8; inst->alg.ivsize = GCM_RFC4106_IV_SIZE;
inst->alg.chunksize = crypto_aead_alg_chunksize(alg); inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
...@@ -1134,7 +1115,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) ...@@ -1134,7 +1115,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
tfm, tfm,
sizeof(struct crypto_rfc4543_req_ctx) + sizeof(struct crypto_rfc4543_req_ctx) +
ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
align + 12); align + GCM_AES_IV_SIZE);
return 0; return 0;
...@@ -1199,7 +1180,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl, ...@@ -1199,7 +1180,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
err = -EINVAL; err = -EINVAL;
/* Underlying IV size must be 12. */ /* Underlying IV size must be 12. */
if (crypto_aead_alg_ivsize(alg) != 12) if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE)
goto out_drop_alg; goto out_drop_alg;
/* Not a stream cipher? */ /* Not a stream cipher? */
...@@ -1222,7 +1203,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl, ...@@ -1222,7 +1203,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx); inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
inst->alg.ivsize = 8; inst->alg.ivsize = GCM_RFC4543_IV_SIZE;
inst->alg.chunksize = crypto_aead_alg_chunksize(alg); inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
......
...@@ -156,6 +156,19 @@ static void gf128mul_x8_bbe(be128 *x) ...@@ -156,6 +156,19 @@ static void gf128mul_x8_bbe(be128 *x)
x->b = cpu_to_be64((b << 8) ^ _tt); x->b = cpu_to_be64((b << 8) ^ _tt);
} }
void gf128mul_x8_ble(le128 *r, const le128 *x)
{
u64 a = le64_to_cpu(x->a);
u64 b = le64_to_cpu(x->b);
/* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
u64 _tt = gf128mul_table_be[a >> 56];
r->a = cpu_to_le64((a << 8) | (b >> 56));
r->b = cpu_to_le64((b << 8) ^ _tt);
}
EXPORT_SYMBOL(gf128mul_x8_ble);
void gf128mul_lle(be128 *r, const be128 *b) void gf128mul_lle(be128 *r, const be128 *b)
{ {
be128 p[8]; be128 p[8];
......
...@@ -93,18 +93,10 @@ struct crypto_kw_ctx { ...@@ -93,18 +93,10 @@ struct crypto_kw_ctx {
struct crypto_kw_block { struct crypto_kw_block {
#define SEMIBSIZE 8 #define SEMIBSIZE 8
u8 A[SEMIBSIZE]; __be64 A;
u8 R[SEMIBSIZE]; __be64 R;
}; };
/* convert 64 bit integer into its string representation */
static inline void crypto_kw_cpu_to_be64(u64 val, u8 *buf)
{
__be64 *a = (__be64 *)buf;
*a = cpu_to_be64(val);
}
/* /*
* Fast forward the SGL to the "end" length minus SEMIBSIZE. * Fast forward the SGL to the "end" length minus SEMIBSIZE.
* The start in the SGL defined by the fast-forward is returned with * The start in the SGL defined by the fast-forward is returned with
...@@ -139,17 +131,10 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, ...@@ -139,17 +131,10 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
struct crypto_blkcipher *tfm = desc->tfm; struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm); struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child; struct crypto_cipher *child = ctx->child;
struct crypto_kw_block block;
unsigned long alignmask = max_t(unsigned long, SEMIBSIZE,
crypto_cipher_alignmask(child));
unsigned int i;
u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask];
struct crypto_kw_block *block = (struct crypto_kw_block *)
PTR_ALIGN(blockbuf + 0, alignmask + 1);
u64 t = 6 * ((nbytes) >> 3);
struct scatterlist *lsrc, *ldst; struct scatterlist *lsrc, *ldst;
u64 t = 6 * ((nbytes) >> 3);
unsigned int i;
int ret = 0; int ret = 0;
/* /*
...@@ -160,7 +145,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, ...@@ -160,7 +145,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
return -EINVAL; return -EINVAL;
/* Place the IV into block A */ /* Place the IV into block A */
memcpy(block->A, desc->info, SEMIBSIZE); memcpy(&block.A, desc->info, SEMIBSIZE);
/* /*
* src scatterlist is read-only. dst scatterlist is r/w. During the * src scatterlist is read-only. dst scatterlist is r/w. During the
...@@ -171,32 +156,27 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, ...@@ -171,32 +156,27 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
ldst = dst; ldst = dst;
for (i = 0; i < 6; i++) { for (i = 0; i < 6; i++) {
u8 tbe_buffer[SEMIBSIZE + alignmask];
/* alignment for the crypto_xor and the _to_be64 operation */
u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1);
unsigned int tmp_nbytes = nbytes;
struct scatter_walk src_walk, dst_walk; struct scatter_walk src_walk, dst_walk;
unsigned int tmp_nbytes = nbytes;
while (tmp_nbytes) { while (tmp_nbytes) {
/* move pointer by tmp_nbytes in the SGL */ /* move pointer by tmp_nbytes in the SGL */
crypto_kw_scatterlist_ff(&src_walk, lsrc, tmp_nbytes); crypto_kw_scatterlist_ff(&src_walk, lsrc, tmp_nbytes);
/* get the source block */ /* get the source block */
scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE, scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
false); false);
/* perform KW operation: get counter as byte string */
crypto_kw_cpu_to_be64(t, tbe);
/* perform KW operation: modify IV with counter */ /* perform KW operation: modify IV with counter */
crypto_xor(block->A, tbe, SEMIBSIZE); block.A ^= cpu_to_be64(t);
t--; t--;
/* perform KW operation: decrypt block */ /* perform KW operation: decrypt block */
crypto_cipher_decrypt_one(child, (u8*)block, crypto_cipher_decrypt_one(child, (u8*)&block,
(u8*)block); (u8*)&block);
/* move pointer by tmp_nbytes in the SGL */ /* move pointer by tmp_nbytes in the SGL */
crypto_kw_scatterlist_ff(&dst_walk, ldst, tmp_nbytes); crypto_kw_scatterlist_ff(&dst_walk, ldst, tmp_nbytes);
/* Copy block->R into place */ /* Copy block->R into place */
scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE, scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
true); true);
tmp_nbytes -= SEMIBSIZE; tmp_nbytes -= SEMIBSIZE;
...@@ -208,11 +188,10 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, ...@@ -208,11 +188,10 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
} }
/* Perform authentication check */ /* Perform authentication check */
if (crypto_memneq("\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", block->A, if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6))
SEMIBSIZE))
ret = -EBADMSG; ret = -EBADMSG;
memzero_explicit(block, sizeof(struct crypto_kw_block)); memzero_explicit(&block, sizeof(struct crypto_kw_block));
return ret; return ret;
} }
...@@ -224,17 +203,10 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, ...@@ -224,17 +203,10 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
struct crypto_blkcipher *tfm = desc->tfm; struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm); struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child; struct crypto_cipher *child = ctx->child;
struct crypto_kw_block block;
unsigned long alignmask = max_t(unsigned long, SEMIBSIZE,
crypto_cipher_alignmask(child));
unsigned int i;
u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask];
struct crypto_kw_block *block = (struct crypto_kw_block *)
PTR_ALIGN(blockbuf + 0, alignmask + 1);
u64 t = 1;
struct scatterlist *lsrc, *ldst; struct scatterlist *lsrc, *ldst;
u64 t = 1;
unsigned int i;
/* /*
* Require at least 2 semiblocks (note, the 3rd semiblock that is * Require at least 2 semiblocks (note, the 3rd semiblock that is
...@@ -249,7 +221,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, ...@@ -249,7 +221,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
* Place the predefined IV into block A -- for encrypt, the caller * Place the predefined IV into block A -- for encrypt, the caller
* does not need to provide an IV, but he needs to fetch the final IV. * does not need to provide an IV, but he needs to fetch the final IV.
*/ */
memcpy(block->A, "\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", SEMIBSIZE); block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6);
/* /*
* src scatterlist is read-only. dst scatterlist is r/w. During the * src scatterlist is read-only. dst scatterlist is r/w. During the
...@@ -260,30 +232,26 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, ...@@ -260,30 +232,26 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
ldst = dst; ldst = dst;
for (i = 0; i < 6; i++) { for (i = 0; i < 6; i++) {
u8 tbe_buffer[SEMIBSIZE + alignmask];
u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1);
unsigned int tmp_nbytes = nbytes;
struct scatter_walk src_walk, dst_walk; struct scatter_walk src_walk, dst_walk;
unsigned int tmp_nbytes = nbytes;
scatterwalk_start(&src_walk, lsrc); scatterwalk_start(&src_walk, lsrc);
scatterwalk_start(&dst_walk, ldst); scatterwalk_start(&dst_walk, ldst);
while (tmp_nbytes) { while (tmp_nbytes) {
/* get the source block */ /* get the source block */
scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE, scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
false); false);
/* perform KW operation: encrypt block */ /* perform KW operation: encrypt block */
crypto_cipher_encrypt_one(child, (u8 *)block, crypto_cipher_encrypt_one(child, (u8 *)&block,
(u8 *)block); (u8 *)&block);
/* perform KW operation: get counter as byte string */
crypto_kw_cpu_to_be64(t, tbe);
/* perform KW operation: modify IV with counter */ /* perform KW operation: modify IV with counter */
crypto_xor(block->A, tbe, SEMIBSIZE); block.A ^= cpu_to_be64(t);
t++; t++;
/* Copy block->R into place */ /* Copy block->R into place */
scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE, scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
true); true);
tmp_nbytes -= SEMIBSIZE; tmp_nbytes -= SEMIBSIZE;
...@@ -295,9 +263,9 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, ...@@ -295,9 +263,9 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
} }
/* establish the IV for the caller to pick up */ /* establish the IV for the caller to pick up */
memcpy(desc->info, block->A, SEMIBSIZE); memcpy(desc->info, &block.A, SEMIBSIZE);
memzero_explicit(block, sizeof(struct crypto_kw_block)); memzero_explicit(&block, sizeof(struct crypto_kw_block));
return 0; return 0;
} }
......
...@@ -328,9 +328,7 @@ static int do_encrypt(struct skcipher_request *req, int err) ...@@ -328,9 +328,7 @@ static int do_encrypt(struct skcipher_request *req, int err)
crypto_skcipher_encrypt(subreq) ?: crypto_skcipher_encrypt(subreq) ?:
post_crypt(req); post_crypt(req);
if (err == -EINPROGRESS || if (err == -EINPROGRESS || err == -EBUSY)
(err == -EBUSY &&
req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return err; return err;
} }
...@@ -380,9 +378,7 @@ static int do_decrypt(struct skcipher_request *req, int err) ...@@ -380,9 +378,7 @@ static int do_decrypt(struct skcipher_request *req, int err)
crypto_skcipher_decrypt(subreq) ?: crypto_skcipher_decrypt(subreq) ?:
post_crypt(req); post_crypt(req);
if (err == -EINPROGRESS || if (err == -EINPROGRESS || err == -EBUSY)
(err == -EBUSY &&
req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return err; return err;
} }
...@@ -610,9 +606,12 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) ...@@ -610,9 +606,12 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
ecb_name[len - 1] = 0; ecb_name[len - 1] = 0;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
return -ENAMETOOLONG; err = -ENAMETOOLONG;
goto err_drop_spawn;
} }
} else
goto err_drop_spawn;
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_priority = alg->base.cra_priority;
......
...@@ -213,8 +213,6 @@ static void rmd128_transform(u32 *state, const __le32 *in) ...@@ -213,8 +213,6 @@ static void rmd128_transform(u32 *state, const __le32 *in)
state[2] = state[3] + aa + bbb; state[2] = state[3] + aa + bbb;
state[3] = state[0] + bb + ccc; state[3] = state[0] + bb + ccc;
state[0] = ddd; state[0] = ddd;
return;
} }
static int rmd128_init(struct shash_desc *desc) static int rmd128_init(struct shash_desc *desc)
......
...@@ -256,8 +256,6 @@ static void rmd160_transform(u32 *state, const __le32 *in) ...@@ -256,8 +256,6 @@ static void rmd160_transform(u32 *state, const __le32 *in)
state[3] = state[4] + aa + bbb; state[3] = state[4] + aa + bbb;
state[4] = state[0] + bb + ccc; state[4] = state[0] + bb + ccc;
state[0] = ddd; state[0] = ddd;
return;
} }
static int rmd160_init(struct shash_desc *desc) static int rmd160_init(struct shash_desc *desc)
......
...@@ -228,8 +228,6 @@ static void rmd256_transform(u32 *state, const __le32 *in) ...@@ -228,8 +228,6 @@ static void rmd256_transform(u32 *state, const __le32 *in)
state[5] += bbb; state[5] += bbb;
state[6] += ccc; state[6] += ccc;
state[7] += ddd; state[7] += ddd;
return;
} }
static int rmd256_init(struct shash_desc *desc) static int rmd256_init(struct shash_desc *desc)
......
...@@ -275,8 +275,6 @@ static void rmd320_transform(u32 *state, const __le32 *in) ...@@ -275,8 +275,6 @@ static void rmd320_transform(u32 *state, const __le32 *in)
state[7] += ccc; state[7] += ccc;
state[8] += ddd; state[8] += ddd;
state[9] += eee; state[9] += eee;
return;
} }
static int rmd320_init(struct shash_desc *desc) static int rmd320_init(struct shash_desc *desc)
......
...@@ -279,9 +279,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req) ...@@ -279,9 +279,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
req->dst, ctx->key_size - 1, req->dst_len); req->dst, ctx->key_size - 1, req->dst_len);
err = crypto_akcipher_encrypt(&req_ctx->child_req); err = crypto_akcipher_encrypt(&req_ctx->child_req);
if (err != -EINPROGRESS && if (err != -EINPROGRESS && err != -EBUSY)
(err != -EBUSY ||
!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
return pkcs1pad_encrypt_sign_complete(req, err); return pkcs1pad_encrypt_sign_complete(req, err);
return err; return err;
...@@ -383,9 +381,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req) ...@@ -383,9 +381,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
ctx->key_size); ctx->key_size);
err = crypto_akcipher_decrypt(&req_ctx->child_req); err = crypto_akcipher_decrypt(&req_ctx->child_req);
if (err != -EINPROGRESS && if (err != -EINPROGRESS && err != -EBUSY)
(err != -EBUSY ||
!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
return pkcs1pad_decrypt_complete(req, err); return pkcs1pad_decrypt_complete(req, err);
return err; return err;
...@@ -440,9 +436,7 @@ static int pkcs1pad_sign(struct akcipher_request *req) ...@@ -440,9 +436,7 @@ static int pkcs1pad_sign(struct akcipher_request *req)
req->dst, ctx->key_size - 1, req->dst_len); req->dst, ctx->key_size - 1, req->dst_len);
err = crypto_akcipher_sign(&req_ctx->child_req); err = crypto_akcipher_sign(&req_ctx->child_req);
if (err != -EINPROGRESS && if (err != -EINPROGRESS && err != -EBUSY)
(err != -EBUSY ||
!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
return pkcs1pad_encrypt_sign_complete(req, err); return pkcs1pad_encrypt_sign_complete(req, err);
return err; return err;
...@@ -561,9 +555,7 @@ static int pkcs1pad_verify(struct akcipher_request *req) ...@@ -561,9 +555,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
ctx->key_size); ctx->key_size);
err = crypto_akcipher_verify(&req_ctx->child_req); err = crypto_akcipher_verify(&req_ctx->child_req);
if (err != -EINPROGRESS && if (err != -EINPROGRESS && err != -EBUSY)
(err != -EBUSY ||
!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
return pkcs1pad_verify_complete(req, err); return pkcs1pad_verify_complete(req, err);
return err; return err;
......
/*
* SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and
* described at https://tools.ietf.org/html/draft-shen-sm3-hash-01
*
* Copyright (C) 2017 ARM Limited or its affiliates.
* Written by Gilad Ben-Yossef <gilad@benyossef.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
#include <linux/bitops.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = {
0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F,
0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74,
0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B
};
EXPORT_SYMBOL_GPL(sm3_zero_message_hash);
static inline u32 p0(u32 x)
{
return x ^ rol32(x, 9) ^ rol32(x, 17);
}
static inline u32 p1(u32 x)
{
return x ^ rol32(x, 15) ^ rol32(x, 23);
}
static inline u32 ff(unsigned int n, u32 a, u32 b, u32 c)
{
return (n < 16) ? (a ^ b ^ c) : ((a & b) | (a & c) | (b & c));
}
static inline u32 gg(unsigned int n, u32 e, u32 f, u32 g)
{
return (n < 16) ? (e ^ f ^ g) : ((e & f) | ((~e) & g));
}
static inline u32 t(unsigned int n)
{
return (n < 16) ? SM3_T1 : SM3_T2;
}
static void sm3_expand(u32 *t, u32 *w, u32 *wt)
{
int i;
unsigned int tmp;
/* load the input */
for (i = 0; i <= 15; i++)
w[i] = get_unaligned_be32((__u32 *)t + i);
for (i = 16; i <= 67; i++) {
tmp = w[i - 16] ^ w[i - 9] ^ rol32(w[i - 3], 15);
w[i] = p1(tmp) ^ (rol32(w[i - 13], 7)) ^ w[i - 6];
}
for (i = 0; i <= 63; i++)
wt[i] = w[i] ^ w[i + 4];
}
static void sm3_compress(u32 *w, u32 *wt, u32 *m)
{
u32 ss1;
u32 ss2;
u32 tt1;
u32 tt2;
u32 a, b, c, d, e, f, g, h;
int i;
a = m[0];
b = m[1];
c = m[2];
d = m[3];
e = m[4];
f = m[5];
g = m[6];
h = m[7];
for (i = 0; i <= 63; i++) {
ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7);
ss2 = ss1 ^ rol32(a, 12);
tt1 = ff(i, a, b, c) + d + ss2 + *wt;
wt++;
tt2 = gg(i, e, f, g) + h + ss1 + *w;
w++;
d = c;
c = rol32(b, 9);
b = a;
a = tt1;
h = g;
g = rol32(f, 19);
f = e;
e = p0(tt2);
}
m[0] = a ^ m[0];
m[1] = b ^ m[1];
m[2] = c ^ m[2];
m[3] = d ^ m[3];
m[4] = e ^ m[4];
m[5] = f ^ m[5];
m[6] = g ^ m[6];
m[7] = h ^ m[7];
a = b = c = d = e = f = g = h = ss1 = ss2 = tt1 = tt2 = 0;
}
static void sm3_transform(struct sm3_state *sst, u8 const *src)
{
unsigned int w[68];
unsigned int wt[64];
sm3_expand((u32 *)src, w, wt);
sm3_compress(w, wt, sst->state);
memzero_explicit(w, sizeof(w));
memzero_explicit(wt, sizeof(wt));
}
static void sm3_generic_block_fn(struct sm3_state *sst, u8 const *src,
int blocks)
{
while (blocks--) {
sm3_transform(sst, src);
src += SM3_BLOCK_SIZE;
}
}
int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sm3_base_do_update(desc, data, len, sm3_generic_block_fn);
}
EXPORT_SYMBOL(crypto_sm3_update);
static int sm3_final(struct shash_desc *desc, u8 *out)
{
sm3_base_do_finalize(desc, sm3_generic_block_fn);
return sm3_base_finish(desc, out);
}
int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *hash)
{
sm3_base_do_update(desc, data, len, sm3_generic_block_fn);
return sm3_final(desc, hash);
}
EXPORT_SYMBOL(crypto_sm3_finup);
static struct shash_alg sm3_alg = {
.digestsize = SM3_DIGEST_SIZE,
.init = sm3_base_init,
.update = crypto_sm3_update,
.final = sm3_final,
.finup = crypto_sm3_finup,
.descsize = sizeof(struct sm3_state),
.base = {
.cra_name = "sm3",
.cra_driver_name = "sm3-generic",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init sm3_generic_mod_init(void)
{
return crypto_register_shash(&sm3_alg);
}
static void __exit sm3_generic_mod_fini(void)
{
crypto_unregister_shash(&sm3_alg);
}
module_init(sm3_generic_mod_init);
module_exit(sm3_generic_mod_fini);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SM3 Secure Hash Algorithm");
MODULE_ALIAS_CRYPTO("sm3");
MODULE_ALIAS_CRYPTO("sm3-generic");
This diff is collapsed.
This diff is collapsed.
...@@ -1497,6 +1497,73 @@ static const struct hash_testvec crct10dif_tv_template[] = { ...@@ -1497,6 +1497,73 @@ static const struct hash_testvec crct10dif_tv_template[] = {
} }
}; };
/* Example vectors below taken from
* http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
*
* The rest taken from
* https://github.com/adamws/oscca-sm3
*/
static const struct hash_testvec sm3_tv_template[] = {
{
.plaintext = "",
.psize = 0,
.digest = (u8 *)(u8 []) {
0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F,
0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74,
0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B }
}, {
.plaintext = "a",
.psize = 1,
.digest = (u8 *)(u8 []) {
0x62, 0x34, 0x76, 0xAC, 0x18, 0xF6, 0x5A, 0x29,
0x09, 0xE4, 0x3C, 0x7F, 0xEC, 0x61, 0xB4, 0x9C,
0x7E, 0x76, 0x4A, 0x91, 0xA1, 0x8C, 0xCB, 0x82,
0xF1, 0x91, 0x7A, 0x29, 0xC8, 0x6C, 0x5E, 0x88 }
}, {
/* A.1. Example 1 */
.plaintext = "abc",
.psize = 3,
.digest = (u8 *)(u8 []) {
0x66, 0xC7, 0xF0, 0xF4, 0x62, 0xEE, 0xED, 0xD9,
0xD1, 0xF2, 0xD4, 0x6B, 0xDC, 0x10, 0xE4, 0xE2,
0x41, 0x67, 0xC4, 0x87, 0x5C, 0xF2, 0xF7, 0xA2,
0x29, 0x7D, 0xA0, 0x2B, 0x8F, 0x4B, 0xA8, 0xE0 }
}, {
.plaintext = "abcdefghijklmnopqrstuvwxyz",
.psize = 26,
.digest = (u8 *)(u8 []) {
0xB8, 0x0F, 0xE9, 0x7A, 0x4D, 0xA2, 0x4A, 0xFC,
0x27, 0x75, 0x64, 0xF6, 0x6A, 0x35, 0x9E, 0xF4,
0x40, 0x46, 0x2A, 0xD2, 0x8D, 0xCC, 0x6D, 0x63,
0xAD, 0xB2, 0x4D, 0x5C, 0x20, 0xA6, 0x15, 0x95 }
}, {
/* A.1. Example 2 */
.plaintext = "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdab"
"cdabcdabcdabcdabcd",
.psize = 64,
.digest = (u8 *)(u8 []) {
0xDE, 0xBE, 0x9F, 0xF9, 0x22, 0x75, 0xB8, 0xA1,
0x38, 0x60, 0x48, 0x89, 0xC1, 0x8E, 0x5A, 0x4D,
0x6F, 0xDB, 0x70, 0xE5, 0x38, 0x7E, 0x57, 0x65,
0x29, 0x3D, 0xCB, 0xA3, 0x9C, 0x0C, 0x57, 0x32 }
}, {
.plaintext = "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
"abcdabcdabcdabcdabcdabcdabcdabcd",
.psize = 256,
.digest = (u8 *)(u8 []) {
0xB9, 0x65, 0x76, 0x4C, 0x8B, 0xEB, 0xB0, 0x91,
0xC7, 0x60, 0x2B, 0x74, 0xAF, 0xD3, 0x4E, 0xEF,
0xB5, 0x31, 0xDC, 0xCB, 0x4E, 0x00, 0x76, 0xD9,
0xB7, 0xCD, 0x81, 0x31, 0x99, 0xB4, 0x59, 0x71 }
}
};
/* /*
* SHA1 test vectors from from FIPS PUB 180-1 * SHA1 test vectors from from FIPS PUB 180-1
* Long vector from CAVS 5.0 * Long vector from CAVS 5.0
...@@ -269,9 +269,7 @@ static int do_encrypt(struct skcipher_request *req, int err) ...@@ -269,9 +269,7 @@ static int do_encrypt(struct skcipher_request *req, int err)
crypto_skcipher_encrypt(subreq) ?: crypto_skcipher_encrypt(subreq) ?:
post_crypt(req); post_crypt(req);
if (err == -EINPROGRESS || if (err == -EINPROGRESS || err == -EBUSY)
(err == -EBUSY &&
req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return err; return err;
} }
...@@ -321,9 +319,7 @@ static int do_decrypt(struct skcipher_request *req, int err) ...@@ -321,9 +319,7 @@ static int do_decrypt(struct skcipher_request *req, int err)
crypto_skcipher_decrypt(subreq) ?: crypto_skcipher_decrypt(subreq) ?:
post_crypt(req); post_crypt(req);
if (err == -EINPROGRESS || if (err == -EINPROGRESS || err == -EBUSY)
(err == -EBUSY &&
req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return err; return err;
} }
......
...@@ -100,12 +100,12 @@ config HW_RANDOM_BCM2835 ...@@ -100,12 +100,12 @@ config HW_RANDOM_BCM2835
If unsure, say Y. If unsure, say Y.
config HW_RANDOM_IPROC_RNG200 config HW_RANDOM_IPROC_RNG200
tristate "Broadcom iProc RNG200 support" tristate "Broadcom iProc/STB RNG200 support"
depends on ARCH_BCM_IPROC depends on ARCH_BCM_IPROC || ARCH_BRCMSTB
default HW_RANDOM default HW_RANDOM
---help--- ---help---
This driver provides kernel-side support for the RNG200 This driver provides kernel-side support for the RNG200
hardware found on the Broadcom iProc SoCs. hardware found on the Broadcom iProc and STB SoCs.
To compile this driver as a module, choose M here: the To compile this driver as a module, choose M here: the
module will be called iproc-rng200 module will be called iproc-rng200
......
...@@ -292,26 +292,48 @@ static struct miscdevice rng_miscdev = { ...@@ -292,26 +292,48 @@ static struct miscdevice rng_miscdev = {
.groups = rng_dev_groups, .groups = rng_dev_groups,
}; };
static int enable_best_rng(void)
{
int ret = -ENODEV;
BUG_ON(!mutex_is_locked(&rng_mutex));
/* rng_list is sorted by quality, use the best (=first) one */
if (!list_empty(&rng_list)) {
struct hwrng *new_rng;
new_rng = list_entry(rng_list.next, struct hwrng, list);
ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
if (!ret)
cur_rng_set_by_user = 0;
}
return ret;
}
static ssize_t hwrng_attr_current_store(struct device *dev, static ssize_t hwrng_attr_current_store(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
const char *buf, size_t len) const char *buf, size_t len)
{ {
int err; int err = -ENODEV;
struct hwrng *rng; struct hwrng *rng;
err = mutex_lock_interruptible(&rng_mutex); err = mutex_lock_interruptible(&rng_mutex);
if (err) if (err)
return -ERESTARTSYS; return -ERESTARTSYS;
err = -ENODEV;
if (sysfs_streq(buf, "")) {
err = enable_best_rng();
} else {
list_for_each_entry(rng, &rng_list, list) { list_for_each_entry(rng, &rng_list, list) {
if (sysfs_streq(rng->name, buf)) { if (sysfs_streq(rng->name, buf)) {
err = 0;
cur_rng_set_by_user = 1; cur_rng_set_by_user = 1;
if (rng != current_rng)
err = set_current_rng(rng); err = set_current_rng(rng);
break; break;
} }
} }
}
mutex_unlock(&rng_mutex); mutex_unlock(&rng_mutex);
return err ? : len; return err ? : len;
...@@ -423,7 +445,7 @@ static void start_khwrngd(void) ...@@ -423,7 +445,7 @@ static void start_khwrngd(void)
{ {
hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
if (IS_ERR(hwrng_fill)) { if (IS_ERR(hwrng_fill)) {
pr_err("hwrng_fill thread creation failed"); pr_err("hwrng_fill thread creation failed\n");
hwrng_fill = NULL; hwrng_fill = NULL;
} }
} }
...@@ -493,17 +515,8 @@ void hwrng_unregister(struct hwrng *rng) ...@@ -493,17 +515,8 @@ void hwrng_unregister(struct hwrng *rng)
mutex_lock(&rng_mutex); mutex_lock(&rng_mutex);
list_del(&rng->list); list_del(&rng->list);
if (current_rng == rng) { if (current_rng == rng)
drop_current_rng(); enable_best_rng();
cur_rng_set_by_user = 0;
/* rng_list is sorted by quality, use the best (=first) one */
if (!list_empty(&rng_list)) {
struct hwrng *new_rng;
new_rng = list_entry(rng_list.next, struct hwrng, list);
set_current_rng(new_rng);
}
}
if (list_empty(&rng_list)) { if (list_empty(&rng_list)) {
mutex_unlock(&rng_mutex); mutex_unlock(&rng_mutex);
......
...@@ -220,6 +220,7 @@ static int iproc_rng200_probe(struct platform_device *pdev) ...@@ -220,6 +220,7 @@ static int iproc_rng200_probe(struct platform_device *pdev)
} }
static const struct of_device_id iproc_rng200_of_match[] = { static const struct of_device_id iproc_rng200_of_match[] = {
{ .compatible = "brcm,bcm7278-rng200", },
{ .compatible = "brcm,iproc-rng200", }, { .compatible = "brcm,iproc-rng200", },
{}, {},
}; };
......
...@@ -72,7 +72,7 @@ static int pseries_rng_remove(struct vio_dev *dev) ...@@ -72,7 +72,7 @@ static int pseries_rng_remove(struct vio_dev *dev)
return 0; return 0;
} }
static struct vio_device_id pseries_rng_driver_ids[] = { static const struct vio_device_id pseries_rng_driver_ids[] = {
{ "ibm,random-v1", "ibm,random"}, { "ibm,random-v1", "ibm,random"},
{ "", "" } { "", "" }
}; };
......
...@@ -52,13 +52,6 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data, ...@@ -52,13 +52,6 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
int retval = 0; int retval = 0;
int period_us = ktime_to_us(priv->period); int period_us = ktime_to_us(priv->period);
/*
* The RNG provides 32-bits per read. Ensure there is enough space for
* at minimum one read.
*/
if (max < sizeof(u32))
return 0;
/* /*
* There may not have been enough time for new data to be generated * There may not have been enough time for new data to be generated
* since the last request. If the caller doesn't want to wait, let them * since the last request. If the caller doesn't want to wait, let them
......
...@@ -184,7 +184,26 @@ static int virtrng_freeze(struct virtio_device *vdev) ...@@ -184,7 +184,26 @@ static int virtrng_freeze(struct virtio_device *vdev)
static int virtrng_restore(struct virtio_device *vdev) static int virtrng_restore(struct virtio_device *vdev)
{ {
return probe_common(vdev); int err;
err = probe_common(vdev);
if (!err) {
struct virtrng_info *vi = vdev->priv;
/*
* Set hwrng_removed to ensure that virtio_read()
* does not block waiting for data before the
* registration is complete.
*/
vi->hwrng_removed = true;
err = hwrng_register(&vi->hwrng);
if (!err) {
vi->hwrng_register_done = true;
vi->hwrng_removed = false;
}
}
return err;
} }
#endif #endif
......
...@@ -199,22 +199,8 @@ config CRYPTO_CRC32_S390 ...@@ -199,22 +199,8 @@ config CRYPTO_CRC32_S390
It is available with IBM z13 or later. It is available with IBM z13 or later.
config CRYPTO_DEV_MV_CESA
tristate "Marvell's Cryptographic Engine"
depends on PLAT_ORION
select CRYPTO_AES
select CRYPTO_BLKCIPHER
select CRYPTO_HASH
select SRAM
help
This driver allows you to utilize the Cryptographic Engines and
Security Accelerator (CESA) which can be found on the Marvell Orion
and Kirkwood SoCs, such as QNAP's TS-209.
Currently the driver supports AES in ECB and CBC mode without DMA.
config CRYPTO_DEV_MARVELL_CESA config CRYPTO_DEV_MARVELL_CESA
tristate "New Marvell's Cryptographic Engine driver" tristate "Marvell's Cryptographic Engine driver"
depends on PLAT_ORION || ARCH_MVEBU depends on PLAT_ORION || ARCH_MVEBU
select CRYPTO_AES select CRYPTO_AES
select CRYPTO_DES select CRYPTO_DES
...@@ -223,12 +209,10 @@ config CRYPTO_DEV_MARVELL_CESA ...@@ -223,12 +209,10 @@ config CRYPTO_DEV_MARVELL_CESA
select SRAM select SRAM
help help
This driver allows you to utilize the Cryptographic Engines and This driver allows you to utilize the Cryptographic Engines and
Security Accelerator (CESA) which can be found on the Armada 370. Security Accelerator (CESA) which can be found on MVEBU and ORION
platforms.
This driver supports CPU offload through DMA transfers. This driver supports CPU offload through DMA transfers.
This driver is aimed at replacing the mv_cesa driver. This will only
happen once it has received proper testing.
config CRYPTO_DEV_NIAGARA2 config CRYPTO_DEV_NIAGARA2
tristate "Niagara2 Stream Processing Unit driver" tristate "Niagara2 Stream Processing Unit driver"
select CRYPTO_DES select CRYPTO_DES
...@@ -315,6 +299,10 @@ config CRYPTO_DEV_PPC4XX ...@@ -315,6 +299,10 @@ config CRYPTO_DEV_PPC4XX
tristate "Driver AMCC PPC4xx crypto accelerator" tristate "Driver AMCC PPC4xx crypto accelerator"
depends on PPC && 4xx depends on PPC && 4xx
select CRYPTO_HASH select CRYPTO_HASH
select CRYPTO_AEAD
select CRYPTO_AES
select CRYPTO_CCM
select CRYPTO_GCM
select CRYPTO_BLKCIPHER select CRYPTO_BLKCIPHER
help help
This option allows you to have support for AMCC crypto acceleration. This option allows you to have support for AMCC crypto acceleration.
...@@ -439,6 +427,20 @@ config CRYPTO_DEV_S5P ...@@ -439,6 +427,20 @@ config CRYPTO_DEV_S5P
Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
algorithms execution. algorithms execution.
config CRYPTO_DEV_EXYNOS_HASH
bool "Support for Samsung Exynos HASH accelerator"
depends on CRYPTO_DEV_S5P
depends on !CRYPTO_DEV_EXYNOS_RNG && CRYPTO_DEV_EXYNOS_RNG!=m
select CRYPTO_SHA1
select CRYPTO_MD5
select CRYPTO_SHA256
help
Select this to offload Exynos from HASH MD5/SHA1/SHA256.
This will select software SHA1, MD5 and SHA256 as they are
needed for small and zero-size messages.
HASH algorithms will be disabled if EXYNOS_RNG
is enabled due to hw conflict.
config CRYPTO_DEV_NX config CRYPTO_DEV_NX
bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration" bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration"
depends on PPC64 depends on PPC64
......
...@@ -15,7 +15,6 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o ...@@ -15,7 +15,6 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/ obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/ obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
......
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o
crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o
This diff is collapsed.
This diff is collapsed.
...@@ -22,7 +22,11 @@ ...@@ -22,7 +22,11 @@
#ifndef __CRYPTO4XX_CORE_H__ #ifndef __CRYPTO4XX_CORE_H__
#define __CRYPTO4XX_CORE_H__ #define __CRYPTO4XX_CORE_H__
#include <linux/ratelimit.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_sa.h"
#define MODULE_NAME "crypto4xx" #define MODULE_NAME "crypto4xx"
...@@ -34,20 +38,28 @@ ...@@ -34,20 +38,28 @@
#define PPC405EX_CE_RESET 0x00000008 #define PPC405EX_CE_RESET 0x00000008
#define CRYPTO4XX_CRYPTO_PRIORITY 300 #define CRYPTO4XX_CRYPTO_PRIORITY 300
#define PPC4XX_LAST_PD 63 #define PPC4XX_NUM_PD 256
#define PPC4XX_NUM_PD 64 #define PPC4XX_LAST_PD (PPC4XX_NUM_PD - 1)
#define PPC4XX_LAST_GD 1023
#define PPC4XX_NUM_GD 1024 #define PPC4XX_NUM_GD 1024
#define PPC4XX_LAST_SD 63 #define PPC4XX_LAST_GD (PPC4XX_NUM_GD - 1)
#define PPC4XX_NUM_SD 64 #define PPC4XX_NUM_SD 256
#define PPC4XX_LAST_SD (PPC4XX_NUM_SD - 1)
#define PPC4XX_SD_BUFFER_SIZE 2048 #define PPC4XX_SD_BUFFER_SIZE 2048
#define PD_ENTRY_INUSE 1 #define PD_ENTRY_BUSY BIT(1)
#define PD_ENTRY_INUSE BIT(0)
#define PD_ENTRY_FREE 0 #define PD_ENTRY_FREE 0
#define ERING_WAS_FULL 0xffffffff #define ERING_WAS_FULL 0xffffffff
struct crypto4xx_device; struct crypto4xx_device;
union shadow_sa_buf {
struct dynamic_sa_ctl sa;
/* alloc 256 bytes which is enough for any kind of dynamic sa */
u8 buf[256];
} __packed;
struct pd_uinfo { struct pd_uinfo {
struct crypto4xx_device *dev; struct crypto4xx_device *dev;
u32 state; u32 state;
...@@ -60,9 +72,8 @@ struct pd_uinfo { ...@@ -60,9 +72,8 @@ struct pd_uinfo {
used by this packet */ used by this packet */
u32 num_sd; /* number of scatter discriptors u32 num_sd; /* number of scatter discriptors
used by this packet */ used by this packet */
void *sa_va; /* shadow sa, when using cp from ctx->sa */ struct dynamic_sa_ctl *sa_va; /* shadow sa */
u32 sa_pa; struct sa_state_record *sr_va; /* state record for shadow sa */
void *sr_va; /* state record for shadow sa */
u32 sr_pa; u32 sr_pa;
struct scatterlist *dest_va; struct scatterlist *dest_va;
struct crypto_async_request *async_req; /* base crypto request struct crypto_async_request *async_req; /* base crypto request
...@@ -72,27 +83,21 @@ struct pd_uinfo { ...@@ -72,27 +83,21 @@ struct pd_uinfo {
struct crypto4xx_device { struct crypto4xx_device {
struct crypto4xx_core_device *core_dev; struct crypto4xx_core_device *core_dev;
char *name; char *name;
u64 ce_phy_address;
void __iomem *ce_base; void __iomem *ce_base;
void __iomem *trng_base; void __iomem *trng_base;
void *pdr; /* base address of packet struct ce_pd *pdr; /* base address of packet descriptor ring */
descriptor ring */ dma_addr_t pdr_pa; /* physical address of pdr_base_register */
dma_addr_t pdr_pa; /* physical address used to struct ce_gd *gdr; /* gather descriptor ring */
program ce pdr_base_register */ dma_addr_t gdr_pa; /* physical address of gdr_base_register */
void *gdr; /* gather descriptor ring */ struct ce_sd *sdr; /* scatter descriptor ring */
dma_addr_t gdr_pa; /* physical address used to dma_addr_t sdr_pa; /* physical address of sdr_base_register */
program ce gdr_base_register */
void *sdr; /* scatter descriptor ring */
dma_addr_t sdr_pa; /* physical address used to
program ce sdr_base_register */
void *scatter_buffer_va; void *scatter_buffer_va;
dma_addr_t scatter_buffer_pa; dma_addr_t scatter_buffer_pa;
u32 scatter_buffer_size;
void *shadow_sa_pool; /* pool of memory for sa in pd_uinfo */ union shadow_sa_buf *shadow_sa_pool;
dma_addr_t shadow_sa_pool_pa; dma_addr_t shadow_sa_pool_pa;
void *shadow_sr_pool; /* pool of memory for sr in pd_uinfo */ struct sa_state_record *shadow_sr_pool;
dma_addr_t shadow_sr_pool_pa; dma_addr_t shadow_sr_pool_pa;
u32 pdr_tail; u32 pdr_tail;
u32 pdr_head; u32 pdr_head;
...@@ -100,9 +105,10 @@ struct crypto4xx_device { ...@@ -100,9 +105,10 @@ struct crypto4xx_device {
u32 gdr_head; u32 gdr_head;
u32 sdr_tail; u32 sdr_tail;
u32 sdr_head; u32 sdr_head;
void *pdr_uinfo; struct pd_uinfo *pdr_uinfo;
struct list_head alg_list; /* List of algorithm supported struct list_head alg_list; /* List of algorithm supported
by this device */ by this device */
struct ratelimit_state aead_ratelimit;
}; };
struct crypto4xx_core_device { struct crypto4xx_core_device {
...@@ -118,30 +124,13 @@ struct crypto4xx_core_device { ...@@ -118,30 +124,13 @@ struct crypto4xx_core_device {
struct crypto4xx_ctx { struct crypto4xx_ctx {
struct crypto4xx_device *dev; struct crypto4xx_device *dev;
void *sa_in; struct dynamic_sa_ctl *sa_in;
dma_addr_t sa_in_dma_addr; struct dynamic_sa_ctl *sa_out;
void *sa_out; __le32 iv_nonce;
dma_addr_t sa_out_dma_addr;
void *state_record;
dma_addr_t state_record_dma_addr;
u32 sa_len; u32 sa_len;
u32 offset_to_sr_ptr; /* offset to state ptr, in dynamic sa */ union {
u32 direction; struct crypto_aead *aead;
u32 next_hdr; } sw_cipher;
u32 save_iv;
u32 pd_ctl_len;
u32 pd_ctl;
u32 bypass;
u32 is_hash;
u32 hash_final;
};
struct crypto4xx_req_ctx {
struct crypto4xx_device *dev; /* Device in which
operation to send to */
void *sa;
u32 sa_dma_addr;
u16 sa_len;
}; };
struct crypto4xx_alg_common { struct crypto4xx_alg_common {
...@@ -149,6 +138,7 @@ struct crypto4xx_alg_common { ...@@ -149,6 +138,7 @@ struct crypto4xx_alg_common {
union { union {
struct crypto_alg cipher; struct crypto_alg cipher;
struct ahash_alg hash; struct ahash_alg hash;
struct aead_alg aead;
} u; } u;
}; };
...@@ -158,43 +148,90 @@ struct crypto4xx_alg { ...@@ -158,43 +148,90 @@ struct crypto4xx_alg {
struct crypto4xx_device *dev; struct crypto4xx_device *dev;
}; };
static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg( int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
struct crypto_alg *x) void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
int crypto4xx_build_pd(struct crypto_async_request *req,
struct crypto4xx_ctx *ctx,
struct scatterlist *src,
struct scatterlist *dst,
const unsigned int datalen,
const __le32 *iv, const u32 iv_len,
const struct dynamic_sa_ctl *sa,
const unsigned int sa_len,
const unsigned int assoclen);
int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_encrypt(struct ablkcipher_request *req);
int crypto4xx_decrypt(struct ablkcipher_request *req);
int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req);
int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req);
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
int crypto4xx_hash_digest(struct ahash_request *req);
int crypto4xx_hash_final(struct ahash_request *req);
int crypto4xx_hash_update(struct ahash_request *req);
int crypto4xx_hash_init(struct ahash_request *req);
/**
* Note: Only use this function to copy items that is word aligned.
*/
static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf,
size_t len)
{ {
switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) { for (; len >= 4; buf += 4, len -= 4)
case CRYPTO_ALG_TYPE_AHASH: *dst++ = __swab32p((u32 *) buf);
return container_of(__crypto_ahash_alg(x),
struct crypto4xx_alg, alg.u.hash); if (len) {
const u8 *tmp = (u8 *)buf;
switch (len) {
case 3:
*dst = (tmp[2] << 16) |
(tmp[1] << 8) |
tmp[0];
break;
case 2:
*dst = (tmp[1] << 8) |
tmp[0];
break;
case 1:
*dst = tmp[0];
break;
default:
break;
} }
}
}
return container_of(x, struct crypto4xx_alg, alg.u.cipher); static inline void crypto4xx_memcpy_from_le32(u32 *dst, const void *buf,
size_t len)
{
crypto4xx_memcpy_swab32(dst, buf, len);
} }
extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); static inline void crypto4xx_memcpy_to_le32(__le32 *dst, const void *buf,
extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); size_t len)
extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx, {
struct crypto4xx_ctx *rctx); crypto4xx_memcpy_swab32((u32 *)dst, buf, len);
extern void crypto4xx_free_sa_rctx(struct crypto4xx_ctx *rctx); }
extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx); int crypto4xx_setauthsize_aead(struct crypto_aead *ciper,
extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx); unsigned int authsize);
extern u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx); int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher,
extern u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx); const u8 *key, unsigned int keylen);
extern void crypto4xx_memcpy_le(unsigned int *dst, int crypto4xx_encrypt_aes_ccm(struct aead_request *req);
const unsigned char *buf, int len); int crypto4xx_decrypt_aes_ccm(struct aead_request *req);
extern u32 crypto4xx_build_pd(struct crypto_async_request *req, int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
struct crypto4xx_ctx *ctx,
struct scatterlist *src,
struct scatterlist *dst,
unsigned int datalen,
void *iv, u32 iv_len);
extern int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen); const u8 *key, unsigned int keylen);
extern int crypto4xx_encrypt(struct ablkcipher_request *req); int crypto4xx_encrypt_aes_gcm(struct aead_request *req);
extern int crypto4xx_decrypt(struct ablkcipher_request *req); int crypto4xx_decrypt_aes_gcm(struct aead_request *req);
extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
extern int crypto4xx_hash_digest(struct ahash_request *req);
extern int crypto4xx_hash_final(struct ahash_request *req);
extern int crypto4xx_hash_update(struct ahash_request *req);
extern int crypto4xx_hash_init(struct ahash_request *req);
#endif #endif
...@@ -261,6 +261,9 @@ union ce_pd_ctl { ...@@ -261,6 +261,9 @@ union ce_pd_ctl {
} bf; } bf;
u32 w; u32 w;
} __attribute__((packed)); } __attribute__((packed));
#define PD_CTL_HASH_FINAL BIT(4)
#define PD_CTL_PE_DONE BIT(1)
#define PD_CTL_HOST_READY BIT(0)
union ce_pd_ctl_len { union ce_pd_ctl_len {
struct { struct {
......
/**
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
* All rights reserved. James Hsiao <jhsiao@amcc.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* @file crypto4xx_sa.c
*
* This file implements the security context
* associate format.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/spinlock_types.h>
#include <linux/highmem.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_sa.h"
#include "crypto4xx_core.h"
u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
{
u32 offset;
union dynamic_sa_contents cts;
if (ctx->direction == DIR_INBOUND)
cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
else
cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
offset = cts.bf.key_size
+ cts.bf.inner_size
+ cts.bf.outer_size
+ cts.bf.spi
+ cts.bf.seq_num0
+ cts.bf.seq_num1
+ cts.bf.seq_num_mask0
+ cts.bf.seq_num_mask1
+ cts.bf.seq_num_mask2
+ cts.bf.seq_num_mask3
+ cts.bf.iv0
+ cts.bf.iv1
+ cts.bf.iv2
+ cts.bf.iv3;
return sizeof(struct dynamic_sa_ctl) + offset * 4;
}
u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx)
{
union dynamic_sa_contents cts;
if (ctx->direction == DIR_INBOUND)
cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
else
cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4;
}
u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx)
{
union dynamic_sa_contents cts;
if (ctx->direction == DIR_INBOUND)
cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
else
cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
return sizeof(struct dynamic_sa_ctl);
}
This diff is collapsed.
This diff is collapsed.
...@@ -2628,7 +2628,6 @@ static bool atmel_sha_filter(struct dma_chan *chan, void *slave) ...@@ -2628,7 +2628,6 @@ static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
static int atmel_sha_dma_init(struct atmel_sha_dev *dd, static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
struct crypto_platform_data *pdata) struct crypto_platform_data *pdata)
{ {
int err = -ENOMEM;
dma_cap_mask_t mask_in; dma_cap_mask_t mask_in;
/* Try to grab DMA channel */ /* Try to grab DMA channel */
...@@ -2639,7 +2638,7 @@ static int atmel_sha_dma_init(struct atmel_sha_dev *dd, ...@@ -2639,7 +2638,7 @@ static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
if (!dd->dma_lch_in.chan) { if (!dd->dma_lch_in.chan) {
dev_warn(dd->dev, "no DMA channel available\n"); dev_warn(dd->dev, "no DMA channel available\n");
return err; return -ENODEV;
} }
dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
...@@ -2778,8 +2777,6 @@ static int atmel_sha_probe(struct platform_device *pdev) ...@@ -2778,8 +2777,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
sha_dd->irq = -1;
/* Get the base address */ /* Get the base address */
sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!sha_res) { if (!sha_res) {
......
This diff is collapsed.
This diff is collapsed.
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <crypto/aes.h> #include <crypto/aes.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/aead.h> #include <crypto/aead.h>
#include <crypto/gcm.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <crypto/sha3.h> #include <crypto/sha3.h>
...@@ -39,8 +40,6 @@ ...@@ -39,8 +40,6 @@
#define ARC4_STATE_SIZE 4 #define ARC4_STATE_SIZE 4
#define CCM_AES_IV_SIZE 16 #define CCM_AES_IV_SIZE 16
#define GCM_AES_IV_SIZE 12
#define GCM_ESP_IV_SIZE 8
#define CCM_ESP_IV_SIZE 8 #define CCM_ESP_IV_SIZE 8
#define RFC4543_ICV_SIZE 16 #define RFC4543_ICV_SIZE 16
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment