Commit 26b265cd authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto update from Herbert Xu:
 - Made x86 ablk_helper generic for ARM
 - Phase out chainiv in favour of eseqiv (affects IPsec)
 - Fixed aes-cbc IV corruption on s390
 - Added constant-time crypto_memneq which replaces memcmp
 - Fixed aes-ctr in omap-aes
 - Added OMAP3 ROM RNG support
 - Add PRNG support for MSM SoC's
 - Add and use Job Ring API in caam
 - Misc fixes

[ NOTE! This pull request was sent within the merge window, but Herbert
  has some questionable email sending setup that makes him public enemy
  #1 as far as gmail is concerned.  So most of his emails seem to be
  trapped by gmail as spam, resulting in me not seeing them.  - Linus ]

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (49 commits)
  crypto: s390 - Fix aes-cbc IV corruption
  crypto: omap-aes - Fix CTR mode counter length
  crypto: omap-sham - Add missing modalias
  padata: make the sequence counter an atomic_t
  crypto: caam - Modify the interface layers to use JR API's
  crypto: caam - Add API's to allocate/free Job Rings
  crypto: caam - Add Platform driver for Job Ring
  hwrng: msm - Add PRNG support for MSM SoC's
  ARM: DT: msm: Add Qualcomm's PRNG driver binding document
  crypto: skcipher - Use eseqiv even on UP machines
  crypto: talitos - Simplify key parsing
  crypto: picoxcell - Simplify and harden key parsing
  crypto: ixp4xx - Simplify and harden key parsing
  crypto: authencesn - Simplify key parsing
  crypto: authenc - Export key parsing helper function
  crypto: mv_cesa: remove deprecated IRQF_DISABLED
  hwrng: OMAP3 ROM Random Number Generator support
  crypto: sha256_ssse3 - also test for BMI2
  crypto: mv_cesa - Remove redundant of_match_ptr
  crypto: sahara - Remove redundant of_match_ptr
  ...
parents 2e7babfa f262f0f5
Qualcomm MSM pseudo random number generator.
Required properties:
- compatible : should be "qcom,prng"
- reg : specifies base physical address and size of the registers map
- clocks : phandle to clock-controller plus clock-specifier pair
- clock-names : "core" clocks all registers, FIFO and circuits in PRNG IP block
Example:
rng@f9bff000 {
compatible = "qcom,prng";
reg = <0xf9bff000 0x200>;
clocks = <&clock GCC_PRNG_AHB_CLK>;
clock-names = "core";
};
...@@ -209,13 +209,3 @@ void __init tegra_init_fuse(void) ...@@ -209,13 +209,3 @@ void __init tegra_init_fuse(void)
tegra_sku_id, tegra_cpu_process_id, tegra_sku_id, tegra_cpu_process_id,
tegra_core_process_id); tegra_core_process_id);
} }
unsigned long long tegra_chip_uid(void)
{
unsigned long long lo, hi;
lo = tegra_fuse_readl(FUSE_UID_LOW);
hi = tegra_fuse_readl(FUSE_UID_HIGH);
return (hi << 32ull) | lo;
}
EXPORT_SYMBOL(tegra_chip_uid);
...@@ -35,7 +35,6 @@ static u8 *ctrblk; ...@@ -35,7 +35,6 @@ static u8 *ctrblk;
static char keylen_flag; static char keylen_flag;
struct s390_aes_ctx { struct s390_aes_ctx {
u8 iv[AES_BLOCK_SIZE];
u8 key[AES_MAX_KEY_SIZE]; u8 key[AES_MAX_KEY_SIZE];
long enc; long enc;
long dec; long dec;
...@@ -441,30 +440,36 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, ...@@ -441,30 +440,36 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return aes_set_key(tfm, in_key, key_len); return aes_set_key(tfm, in_key, key_len);
} }
static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param, static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
struct blkcipher_walk *walk) struct blkcipher_walk *walk)
{ {
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
int ret = blkcipher_walk_virt(desc, walk); int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes = walk->nbytes; unsigned int nbytes = walk->nbytes;
struct {
u8 iv[AES_BLOCK_SIZE];
u8 key[AES_MAX_KEY_SIZE];
} param;
if (!nbytes) if (!nbytes)
goto out; goto out;
memcpy(param, walk->iv, AES_BLOCK_SIZE); memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
memcpy(param.key, sctx->key, sctx->key_len);
do { do {
/* only use complete blocks */ /* only use complete blocks */
unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr; u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr; u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, param, out, in, n); ret = crypt_s390_kmc(func, &param, out, in, n);
if (ret < 0 || ret != n) if (ret < 0 || ret != n)
return -EIO; return -EIO;
nbytes &= AES_BLOCK_SIZE - 1; nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes); ret = blkcipher_walk_done(desc, walk, nbytes);
} while ((nbytes = walk->nbytes)); } while ((nbytes = walk->nbytes));
memcpy(walk->iv, param, AES_BLOCK_SIZE); memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
out: out:
return ret; return ret;
...@@ -481,7 +486,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, ...@@ -481,7 +486,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
return fallback_blk_enc(desc, dst, src, nbytes); return fallback_blk_enc(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk); return cbc_aes_crypt(desc, sctx->enc, &walk);
} }
static int cbc_aes_decrypt(struct blkcipher_desc *desc, static int cbc_aes_decrypt(struct blkcipher_desc *desc,
...@@ -495,7 +500,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, ...@@ -495,7 +500,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
return fallback_blk_dec(desc, dst, src, nbytes); return fallback_blk_dec(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk); return cbc_aes_crypt(desc, sctx->dec, &walk);
} }
static struct crypto_alg cbc_aes_alg = { static struct crypto_alg cbc_aes_alg = {
......
...@@ -3,8 +3,9 @@ ...@@ -3,8 +3,9 @@
# #
avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no) avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no)
avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
$(comma)4)$(comma)%ymm2,yes,no)
obj-$(CONFIG_CRYPTO_ABLK_HELPER_X86) += ablk_helper.o
obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o
obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/i387.h> #include <asm/i387.h>
#include <asm/crypto/aes.h> #include <asm/crypto/aes.h>
#include <asm/crypto/ablk_helper.h> #include <crypto/ablk_helper.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h> #include <crypto/internal/aead.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/err.h> #include <linux/err.h>
#include <crypto/ablk_helper.h>
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/ctr.h> #include <crypto/ctr.h>
#include <crypto/lrw.h> #include <crypto/lrw.h>
...@@ -21,7 +22,6 @@ ...@@ -21,7 +22,6 @@
#include <asm/xcr.h> #include <asm/xcr.h>
#include <asm/xsave.h> #include <asm/xsave.h>
#include <asm/crypto/camellia.h> #include <asm/crypto/camellia.h>
#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h> #include <asm/crypto/glue_helper.h>
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/err.h> #include <linux/err.h>
#include <crypto/ablk_helper.h>
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/ctr.h> #include <crypto/ctr.h>
#include <crypto/lrw.h> #include <crypto/lrw.h>
...@@ -21,7 +22,6 @@ ...@@ -21,7 +22,6 @@
#include <asm/xcr.h> #include <asm/xcr.h>
#include <asm/xsave.h> #include <asm/xsave.h>
#include <asm/crypto/camellia.h> #include <asm/crypto/camellia.h>
#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h> #include <asm/crypto/glue_helper.h>
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
......
...@@ -26,13 +26,13 @@ ...@@ -26,13 +26,13 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/err.h> #include <linux/err.h>
#include <crypto/ablk_helper.h>
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/cast5.h> #include <crypto/cast5.h>
#include <crypto/cryptd.h> #include <crypto/cryptd.h>
#include <crypto/ctr.h> #include <crypto/ctr.h>
#include <asm/xcr.h> #include <asm/xcr.h>
#include <asm/xsave.h> #include <asm/xsave.h>
#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h> #include <asm/crypto/glue_helper.h>
#define CAST5_PARALLEL_BLOCKS 16 #define CAST5_PARALLEL_BLOCKS 16
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/err.h> #include <linux/err.h>
#include <crypto/ablk_helper.h>
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/cast6.h> #include <crypto/cast6.h>
#include <crypto/cryptd.h> #include <crypto/cryptd.h>
...@@ -37,7 +38,6 @@ ...@@ -37,7 +38,6 @@
#include <crypto/xts.h> #include <crypto/xts.h>
#include <asm/xcr.h> #include <asm/xcr.h>
#include <asm/xsave.h> #include <asm/xsave.h>
#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h> #include <asm/crypto/glue_helper.h>
#define CAST6_PARALLEL_BLOCKS 8 #define CAST6_PARALLEL_BLOCKS 8
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/err.h> #include <linux/err.h>
#include <crypto/ablk_helper.h>
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/ctr.h> #include <crypto/ctr.h>
#include <crypto/lrw.h> #include <crypto/lrw.h>
...@@ -22,7 +23,6 @@ ...@@ -22,7 +23,6 @@
#include <asm/xcr.h> #include <asm/xcr.h>
#include <asm/xsave.h> #include <asm/xsave.h>
#include <asm/crypto/serpent-avx.h> #include <asm/crypto/serpent-avx.h>
#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h> #include <asm/crypto/glue_helper.h>
#define SERPENT_AVX2_PARALLEL_BLOCKS 16 #define SERPENT_AVX2_PARALLEL_BLOCKS 16
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/err.h> #include <linux/err.h>
#include <crypto/ablk_helper.h>
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/serpent.h> #include <crypto/serpent.h>
#include <crypto/cryptd.h> #include <crypto/cryptd.h>
...@@ -38,7 +39,6 @@ ...@@ -38,7 +39,6 @@
#include <asm/xcr.h> #include <asm/xcr.h>
#include <asm/xsave.h> #include <asm/xsave.h>
#include <asm/crypto/serpent-avx.h> #include <asm/crypto/serpent-avx.h>
#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h> #include <asm/crypto/glue_helper.h>
/* 8-way parallel cipher functions */ /* 8-way parallel cipher functions */
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/err.h> #include <linux/err.h>
#include <crypto/ablk_helper.h>
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/serpent.h> #include <crypto/serpent.h>
#include <crypto/cryptd.h> #include <crypto/cryptd.h>
...@@ -42,7 +43,6 @@ ...@@ -42,7 +43,6 @@
#include <crypto/lrw.h> #include <crypto/lrw.h>
#include <crypto/xts.h> #include <crypto/xts.h>
#include <asm/crypto/serpent-sse2.h> #include <asm/crypto/serpent-sse2.h>
#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h> #include <asm/crypto/glue_helper.h>
static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
......
...@@ -281,7 +281,7 @@ static int __init sha256_ssse3_mod_init(void) ...@@ -281,7 +281,7 @@ static int __init sha256_ssse3_mod_init(void)
/* allow AVX to override SSSE3, it's a little faster */ /* allow AVX to override SSSE3, it's a little faster */
if (avx_usable()) { if (avx_usable()) {
#ifdef CONFIG_AS_AVX2 #ifdef CONFIG_AS_AVX2
if (boot_cpu_has(X86_FEATURE_AVX2)) if (boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI2))
sha256_transform_asm = sha256_transform_rorx; sha256_transform_asm = sha256_transform_rorx;
else else
#endif #endif
...@@ -319,4 +319,4 @@ MODULE_LICENSE("GPL"); ...@@ -319,4 +319,4 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated"); MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
MODULE_ALIAS("sha256"); MODULE_ALIAS("sha256");
MODULE_ALIAS("sha384"); MODULE_ALIAS("sha224");
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/err.h> #include <linux/err.h>
#include <crypto/ablk_helper.h>
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/twofish.h> #include <crypto/twofish.h>
#include <crypto/cryptd.h> #include <crypto/cryptd.h>
...@@ -39,7 +40,6 @@ ...@@ -39,7 +40,6 @@
#include <asm/xcr.h> #include <asm/xcr.h>
#include <asm/xsave.h> #include <asm/xsave.h>
#include <asm/crypto/twofish.h> #include <asm/crypto/twofish.h>
#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h> #include <asm/crypto/glue_helper.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
......
#include <asm/i387.h>
/*
* may_use_simd - whether it is allowable at this time to issue SIMD
* instructions or access the SIMD register file
*/
static __must_check inline bool may_use_simd(void)
{
return irq_fpu_usable();
}
...@@ -174,9 +174,8 @@ config CRYPTO_TEST ...@@ -174,9 +174,8 @@ config CRYPTO_TEST
help help
Quick & dirty crypto test module. Quick & dirty crypto test module.
config CRYPTO_ABLK_HELPER_X86 config CRYPTO_ABLK_HELPER
tristate tristate
depends on X86
select CRYPTO_CRYPTD select CRYPTO_CRYPTD
config CRYPTO_GLUE_HELPER_X86 config CRYPTO_GLUE_HELPER_X86
...@@ -695,7 +694,7 @@ config CRYPTO_AES_NI_INTEL ...@@ -695,7 +694,7 @@ config CRYPTO_AES_NI_INTEL
select CRYPTO_AES_X86_64 if 64BIT select CRYPTO_AES_X86_64 if 64BIT
select CRYPTO_AES_586 if !64BIT select CRYPTO_AES_586 if !64BIT
select CRYPTO_CRYPTD select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86 select CRYPTO_ABLK_HELPER
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_GLUE_HELPER_X86 if 64BIT select CRYPTO_GLUE_HELPER_X86 if 64BIT
select CRYPTO_LRW select CRYPTO_LRW
...@@ -895,7 +894,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 ...@@ -895,7 +894,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
depends on CRYPTO depends on CRYPTO
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_CRYPTD select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86 select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86 select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAMELLIA_X86_64 select CRYPTO_CAMELLIA_X86_64
select CRYPTO_LRW select CRYPTO_LRW
...@@ -917,7 +916,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 ...@@ -917,7 +916,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
depends on CRYPTO depends on CRYPTO
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_CRYPTD select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86 select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86 select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAMELLIA_X86_64 select CRYPTO_CAMELLIA_X86_64
select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
...@@ -969,7 +968,7 @@ config CRYPTO_CAST5_AVX_X86_64 ...@@ -969,7 +968,7 @@ config CRYPTO_CAST5_AVX_X86_64
depends on X86 && 64BIT depends on X86 && 64BIT
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_CRYPTD select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86 select CRYPTO_ABLK_HELPER
select CRYPTO_CAST_COMMON select CRYPTO_CAST_COMMON
select CRYPTO_CAST5 select CRYPTO_CAST5
help help
...@@ -992,7 +991,7 @@ config CRYPTO_CAST6_AVX_X86_64 ...@@ -992,7 +991,7 @@ config CRYPTO_CAST6_AVX_X86_64
depends on X86 && 64BIT depends on X86 && 64BIT
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_CRYPTD select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86 select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86 select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAST_COMMON select CRYPTO_CAST_COMMON
select CRYPTO_CAST6 select CRYPTO_CAST6
...@@ -1110,7 +1109,7 @@ config CRYPTO_SERPENT_SSE2_X86_64 ...@@ -1110,7 +1109,7 @@ config CRYPTO_SERPENT_SSE2_X86_64
depends on X86 && 64BIT depends on X86 && 64BIT
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_CRYPTD select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86 select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86 select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT select CRYPTO_SERPENT
select CRYPTO_LRW select CRYPTO_LRW
...@@ -1132,7 +1131,7 @@ config CRYPTO_SERPENT_SSE2_586 ...@@ -1132,7 +1131,7 @@ config CRYPTO_SERPENT_SSE2_586
depends on X86 && !64BIT depends on X86 && !64BIT
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_CRYPTD select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86 select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86 select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT select CRYPTO_SERPENT
select CRYPTO_LRW select CRYPTO_LRW
...@@ -1154,7 +1153,7 @@ config CRYPTO_SERPENT_AVX_X86_64 ...@@ -1154,7 +1153,7 @@ config CRYPTO_SERPENT_AVX_X86_64
depends on X86 && 64BIT depends on X86 && 64BIT
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_CRYPTD select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86 select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86 select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT select CRYPTO_SERPENT
select CRYPTO_LRW select CRYPTO_LRW
...@@ -1176,7 +1175,7 @@ config CRYPTO_SERPENT_AVX2_X86_64 ...@@ -1176,7 +1175,7 @@ config CRYPTO_SERPENT_AVX2_X86_64
depends on X86 && 64BIT depends on X86 && 64BIT
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_CRYPTD select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86 select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86 select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT select CRYPTO_SERPENT
select CRYPTO_SERPENT_AVX_X86_64 select CRYPTO_SERPENT_AVX_X86_64
...@@ -1292,7 +1291,7 @@ config CRYPTO_TWOFISH_AVX_X86_64 ...@@ -1292,7 +1291,7 @@ config CRYPTO_TWOFISH_AVX_X86_64
depends on X86 && 64BIT depends on X86 && 64BIT
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_CRYPTD select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86 select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86 select CRYPTO_GLUE_HELPER_X86
select CRYPTO_TWOFISH_COMMON select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64 select CRYPTO_TWOFISH_X86_64
......
...@@ -2,8 +2,13 @@ ...@@ -2,8 +2,13 @@
# Cryptographic API # Cryptographic API
# #
# memneq MUST be built with -Os or -O0 to prevent early-return optimizations
# that will defeat memneq's actual purpose to prevent timing attacks.
CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3
CFLAGS_memneq.o := -Os
obj-$(CONFIG_CRYPTO) += crypto.o obj-$(CONFIG_CRYPTO) += crypto.o
crypto-y := api.o cipher.o compress.o crypto-y := api.o cipher.o compress.o memneq.o
obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
...@@ -105,3 +110,4 @@ obj-$(CONFIG_XOR_BLOCKS) += xor.o ...@@ -105,3 +110,4 @@ obj-$(CONFIG_XOR_BLOCKS) += xor.o
obj-$(CONFIG_ASYNC_CORE) += async_tx/ obj-$(CONFIG_ASYNC_CORE) += async_tx/
obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
...@@ -28,10 +28,11 @@ ...@@ -28,10 +28,11 @@
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/hardirq.h>
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/cryptd.h> #include <crypto/cryptd.h>
#include <asm/i387.h> #include <crypto/ablk_helper.h>
#include <asm/crypto/ablk_helper.h> #include <asm/simd.h>
int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len) unsigned int key_len)
...@@ -70,11 +71,11 @@ int ablk_encrypt(struct ablkcipher_request *req) ...@@ -70,11 +71,11 @@ int ablk_encrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (!irq_fpu_usable()) { if (!may_use_simd()) {
struct ablkcipher_request *cryptd_req = struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req); ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req)); *cryptd_req = *req;
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_ablkcipher_encrypt(cryptd_req); return crypto_ablkcipher_encrypt(cryptd_req);
...@@ -89,11 +90,11 @@ int ablk_decrypt(struct ablkcipher_request *req) ...@@ -89,11 +90,11 @@ int ablk_decrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (!irq_fpu_usable()) { if (!may_use_simd()) {
struct ablkcipher_request *cryptd_req = struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req); ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req)); *cryptd_req = *req;
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_ablkcipher_decrypt(cryptd_req); return crypto_ablkcipher_decrypt(cryptd_req);
......
...@@ -16,9 +16,7 @@ ...@@ -16,9 +16,7 @@
#include <crypto/internal/skcipher.h> #include <crypto/internal/skcipher.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -30,8 +28,6 @@ ...@@ -30,8 +28,6 @@
#include "internal.h" #include "internal.h"
static const char *skcipher_default_geniv __read_mostly;
struct ablkcipher_buffer { struct ablkcipher_buffer {
struct list_head entry; struct list_head entry;
struct scatter_walk dst; struct scatter_walk dst;
...@@ -527,8 +523,7 @@ const char *crypto_default_geniv(const struct crypto_alg *alg) ...@@ -527,8 +523,7 @@ const char *crypto_default_geniv(const struct crypto_alg *alg)
alg->cra_blocksize) alg->cra_blocksize)
return "chainiv"; return "chainiv";
return alg->cra_flags & CRYPTO_ALG_ASYNC ? return "eseqiv";
"eseqiv" : skcipher_default_geniv;
} }
static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
...@@ -709,17 +704,3 @@ struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, ...@@ -709,17 +704,3 @@ struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
return ERR_PTR(err); return ERR_PTR(err);
} }
EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
static int __init skcipher_module_init(void)
{
skcipher_default_geniv = num_possible_cpus() > 1 ?
"eseqiv" : "chainiv";
return 0;
}
static void skcipher_module_exit(void)
{
}
module_init(skcipher_module_init);
module_exit(skcipher_module_exit);
...@@ -230,11 +230,11 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx, ...@@ -230,11 +230,11 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx,
*/ */
if (byte_count < DEFAULT_BLK_SZ) { if (byte_count < DEFAULT_BLK_SZ) {
empty_rbuf: empty_rbuf:
for (; ctx->rand_data_valid < DEFAULT_BLK_SZ; while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
ctx->rand_data_valid++) {
*ptr = ctx->rand_data[ctx->rand_data_valid]; *ptr = ctx->rand_data[ctx->rand_data_valid];
ptr++; ptr++;
byte_count--; byte_count--;
ctx->rand_data_valid++;
if (byte_count == 0) if (byte_count == 0)
goto done; goto done;
} }
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <crypto/algapi.h>
#include "public_key.h" #include "public_key.h"
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -189,12 +190,12 @@ static int RSA_verify(const u8 *H, const u8 *EM, size_t k, size_t hash_size, ...@@ -189,12 +190,12 @@ static int RSA_verify(const u8 *H, const u8 *EM, size_t k, size_t hash_size,
} }
} }
if (memcmp(asn1_template, EM + T_offset, asn1_size) != 0) { if (crypto_memneq(asn1_template, EM + T_offset, asn1_size) != 0) {
kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]"); kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]");
return -EBADMSG; return -EBADMSG;
} }
if (memcmp(H, EM + T_offset + asn1_size, hash_size) != 0) { if (crypto_memneq(H, EM + T_offset + asn1_size, hash_size) != 0) {
kleave(" = -EKEYREJECTED [EM[T] hash mismatch]"); kleave(" = -EKEYREJECTED [EM[T] hash mismatch]");
return -EKEYREJECTED; return -EKEYREJECTED;
} }
......
...@@ -52,40 +52,52 @@ static void authenc_request_complete(struct aead_request *req, int err) ...@@ -52,40 +52,52 @@ static void authenc_request_complete(struct aead_request *req, int err)
aead_request_complete(req, err); aead_request_complete(req, err);
} }
static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
unsigned int authkeylen; struct rtattr *rta = (struct rtattr *)key;
unsigned int enckeylen;
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct crypto_ahash *auth = ctx->auth;
struct crypto_ablkcipher *enc = ctx->enc;
struct rtattr *rta = (void *)key;
struct crypto_authenc_key_param *param; struct crypto_authenc_key_param *param;
int err = -EINVAL;
if (!RTA_OK(rta, keylen)) if (!RTA_OK(rta, keylen))
goto badkey; return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey; return -EINVAL;
if (RTA_PAYLOAD(rta) < sizeof(*param)) if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey; return -EINVAL;
param = RTA_DATA(rta); param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen); keys->enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len); key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len); keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < enckeylen) if (keylen < keys->enckeylen)
goto badkey; return -EINVAL;
authkeylen = keylen - enckeylen; keys->authkeylen = keylen - keys->enckeylen;
keys->authkey = key;
keys->enckey = key + keys->authkeylen;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys);
static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
unsigned int keylen)
{
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct crypto_ahash *auth = ctx->auth;
struct crypto_ablkcipher *enc = ctx->enc;
struct crypto_authenc_keys keys;
int err = -EINVAL;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) & crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK); CRYPTO_TFM_REQ_MASK);
err = crypto_ahash_setkey(auth, key, authkeylen); err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) & crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK); CRYPTO_TFM_RES_MASK);
...@@ -95,7 +107,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, ...@@ -95,7 +107,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) & crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK); CRYPTO_TFM_REQ_MASK);
err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) & crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) &
CRYPTO_TFM_RES_MASK); CRYPTO_TFM_RES_MASK);
...@@ -188,7 +200,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, ...@@ -188,7 +200,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0); authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err) if (err)
goto out; goto out;
...@@ -227,7 +239,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq, ...@@ -227,7 +239,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0); authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err) if (err)
goto out; goto out;
...@@ -462,7 +474,7 @@ static int crypto_authenc_verify(struct aead_request *req, ...@@ -462,7 +474,7 @@ static int crypto_authenc_verify(struct aead_request *req,
ihash = ohash + authsize; ihash = ohash + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0); authsize, 0);
return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
} }
static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
......
...@@ -59,37 +59,19 @@ static void authenc_esn_request_complete(struct aead_request *req, int err) ...@@ -59,37 +59,19 @@ static void authenc_esn_request_complete(struct aead_request *req, int err)
static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key, static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
unsigned int authkeylen;
unsigned int enckeylen;
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct crypto_ahash *auth = ctx->auth; struct crypto_ahash *auth = ctx->auth;
struct crypto_ablkcipher *enc = ctx->enc; struct crypto_ablkcipher *enc = ctx->enc;
struct rtattr *rta = (void *)key; struct crypto_authenc_keys keys;
struct crypto_authenc_key_param *param;
int err = -EINVAL; int err = -EINVAL;
if (!RTA_OK(rta, keylen)) if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey; goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey;
param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < enckeylen)
goto badkey;
authkeylen = keylen - enckeylen;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) & crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK); CRYPTO_TFM_REQ_MASK);
err = crypto_ahash_setkey(auth, key, authkeylen); err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) & crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK); CRYPTO_TFM_RES_MASK);
...@@ -99,7 +81,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 * ...@@ -99,7 +81,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK); CRYPTO_TFM_REQ_MASK);
err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) & crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) &
CRYPTO_TFM_RES_MASK); CRYPTO_TFM_RES_MASK);
...@@ -247,7 +229,7 @@ static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *ar ...@@ -247,7 +229,7 @@ static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *ar
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0); authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err) if (err)
goto out; goto out;
...@@ -296,7 +278,7 @@ static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *a ...@@ -296,7 +278,7 @@ static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *a
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0); authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err) if (err)
goto out; goto out;
...@@ -336,7 +318,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, ...@@ -336,7 +318,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0); authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err) if (err)
goto out; goto out;
...@@ -568,7 +550,7 @@ static int crypto_authenc_esn_verify(struct aead_request *req) ...@@ -568,7 +550,7 @@ static int crypto_authenc_esn_verify(struct aead_request *req)
ihash = ohash + authsize; ihash = ohash + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0); authsize, 0);
return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
} }
static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
......
...@@ -363,7 +363,7 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq, ...@@ -363,7 +363,7 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
if (!err) { if (!err) {
err = crypto_ccm_auth(req, req->dst, cryptlen); err = crypto_ccm_auth(req, req->dst, cryptlen);
if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize)) if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize))
err = -EBADMSG; err = -EBADMSG;
} }
aead_request_complete(req, err); aead_request_complete(req, err);
...@@ -422,7 +422,7 @@ static int crypto_ccm_decrypt(struct aead_request *req) ...@@ -422,7 +422,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
return err; return err;
/* verify */ /* verify */
if (memcmp(authtag, odata, authsize)) if (crypto_memneq(authtag, odata, authsize))
return -EBADMSG; return -EBADMSG;
return err; return err;
......
...@@ -582,7 +582,7 @@ static int crypto_gcm_verify(struct aead_request *req, ...@@ -582,7 +582,7 @@ static int crypto_gcm_verify(struct aead_request *req,
crypto_xor(auth_tag, iauth_tag, 16); crypto_xor(auth_tag, iauth_tag, 16);
scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
} }
static void gcm_decrypt_done(struct crypto_async_request *areq, int err) static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
......
/*
* Constant-time equality testing of memory regions.
*
* Authors:
*
* James Yonan <james@openvpn.net>
* Daniel Borkmann <dborkman@redhat.com>
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* BSD LICENSE
*
* Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of OpenVPN Technologies nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <crypto/algapi.h>
#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ
/* Generic path for arbitrary size */
static inline unsigned long
__crypto_memneq_generic(const void *a, const void *b, size_t size)
{
unsigned long neq = 0;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
while (size >= sizeof(unsigned long)) {
neq |= *(unsigned long *)a ^ *(unsigned long *)b;
a += sizeof(unsigned long);
b += sizeof(unsigned long);
size -= sizeof(unsigned long);
}
#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
while (size > 0) {
neq |= *(unsigned char *)a ^ *(unsigned char *)b;
a += 1;
b += 1;
size -= 1;
}
return neq;
}
/* Loop-free fast-path for frequently used 16-byte size */
static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
{
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (sizeof(unsigned long) == 8)
return ((*(unsigned long *)(a) ^ *(unsigned long *)(b))
| (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8)));
else if (sizeof(unsigned int) == 4)
return ((*(unsigned int *)(a) ^ *(unsigned int *)(b))
| (*(unsigned int *)(a+4) ^ *(unsigned int *)(b+4))
| (*(unsigned int *)(a+8) ^ *(unsigned int *)(b+8))
| (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12)));
else
#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
return ((*(unsigned char *)(a) ^ *(unsigned char *)(b))
| (*(unsigned char *)(a+1) ^ *(unsigned char *)(b+1))
| (*(unsigned char *)(a+2) ^ *(unsigned char *)(b+2))
| (*(unsigned char *)(a+3) ^ *(unsigned char *)(b+3))
| (*(unsigned char *)(a+4) ^ *(unsigned char *)(b+4))
| (*(unsigned char *)(a+5) ^ *(unsigned char *)(b+5))
| (*(unsigned char *)(a+6) ^ *(unsigned char *)(b+6))
| (*(unsigned char *)(a+7) ^ *(unsigned char *)(b+7))
| (*(unsigned char *)(a+8) ^ *(unsigned char *)(b+8))
| (*(unsigned char *)(a+9) ^ *(unsigned char *)(b+9))
| (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10))
| (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11))
| (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12))
| (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13))
| (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14))
| (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15)));
}
/* Compare two areas of memory without leaking timing information,
* and with special optimizations for common sizes. Users should
* not call this function directly, but should instead use
* crypto_memneq defined in crypto/algapi.h.
*/
noinline unsigned long __crypto_memneq(const void *a, const void *b,
size_t size)
{
switch (size) {
case 16:
return __crypto_memneq_16(a, b);
default:
return __crypto_memneq_generic(a, b, size);
}
}
EXPORT_SYMBOL(__crypto_memneq);
#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */
...@@ -165,6 +165,19 @@ config HW_RANDOM_OMAP ...@@ -165,6 +165,19 @@ config HW_RANDOM_OMAP
If unsure, say Y. If unsure, say Y.
config HW_RANDOM_OMAP3_ROM
tristate "OMAP3 ROM Random Number Generator support"
depends on HW_RANDOM && ARCH_OMAP3
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on OMAP34xx processors.
To compile this driver as a module, choose M here: the
module will be called omap3-rom-rng.
If unsure, say Y.
config HW_RANDOM_OCTEON config HW_RANDOM_OCTEON
tristate "Octeon Random Number Generator support" tristate "Octeon Random Number Generator support"
depends on HW_RANDOM && CAVIUM_OCTEON_SOC depends on HW_RANDOM && CAVIUM_OCTEON_SOC
...@@ -327,3 +340,15 @@ config HW_RANDOM_TPM ...@@ -327,3 +340,15 @@ config HW_RANDOM_TPM
module will be called tpm-rng. module will be called tpm-rng.
If unsure, say Y. If unsure, say Y.
config HW_RANDOM_MSM
tristate "Qualcomm MSM Random Number Generator support"
depends on HW_RANDOM && ARCH_MSM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on Qualcomm MSM SoCs.
To compile this driver as a module, choose M here. the
module will be called msm-rng.
If unsure, say Y.
...@@ -15,6 +15,7 @@ n2-rng-y := n2-drv.o n2-asm.o ...@@ -15,6 +15,7 @@ n2-rng-y := n2-drv.o n2-asm.o
obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
...@@ -28,3 +29,4 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o ...@@ -28,3 +29,4 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
/*
* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
/* Device specific register offsets */
#define PRNG_DATA_OUT 0x0000
#define PRNG_STATUS 0x0004
#define PRNG_LFSR_CFG 0x0100
#define PRNG_CONFIG 0x0104
/* Device specific register masks and config values */
#define PRNG_LFSR_CFG_MASK 0x0000ffff
#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd
#define PRNG_CONFIG_HW_ENABLE BIT(1)
#define PRNG_STATUS_DATA_AVAIL BIT(0)
#define MAX_HW_FIFO_DEPTH 16
#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4)
#define WORD_SZ 4
struct msm_rng {
void __iomem *base;
struct clk *clk;
struct hwrng hwrng;
};
#define to_msm_rng(p) container_of(p, struct msm_rng, hwrng)
static int msm_rng_enable(struct hwrng *hwrng, int enable)
{
struct msm_rng *rng = to_msm_rng(hwrng);
u32 val;
int ret;
ret = clk_prepare_enable(rng->clk);
if (ret)
return ret;
if (enable) {
/* Enable PRNG only if it is not already enabled */
val = readl_relaxed(rng->base + PRNG_CONFIG);
if (val & PRNG_CONFIG_HW_ENABLE)
goto already_enabled;
val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
val &= ~PRNG_LFSR_CFG_MASK;
val |= PRNG_LFSR_CFG_CLOCKS;
writel(val, rng->base + PRNG_LFSR_CFG);
val = readl_relaxed(rng->base + PRNG_CONFIG);
val |= PRNG_CONFIG_HW_ENABLE;
writel(val, rng->base + PRNG_CONFIG);
} else {
val = readl_relaxed(rng->base + PRNG_CONFIG);
val &= ~PRNG_CONFIG_HW_ENABLE;
writel(val, rng->base + PRNG_CONFIG);
}
already_enabled:
clk_disable_unprepare(rng->clk);
return 0;
}
static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
{
struct msm_rng *rng = to_msm_rng(hwrng);
size_t currsize = 0;
u32 *retdata = data;
size_t maxsize;
int ret;
u32 val;
/* calculate max size bytes to transfer back to caller */
maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
/* no room for word data */
if (maxsize < WORD_SZ)
return 0;
ret = clk_prepare_enable(rng->clk);
if (ret)
return ret;
/* read random data from hardware */
do {
val = readl_relaxed(rng->base + PRNG_STATUS);
if (!(val & PRNG_STATUS_DATA_AVAIL))
break;
val = readl_relaxed(rng->base + PRNG_DATA_OUT);
if (!val)
break;
*retdata++ = val;
currsize += WORD_SZ;
/* make sure we stay on 32bit boundary */
if ((maxsize - currsize) < WORD_SZ)
break;
} while (currsize < maxsize);
clk_disable_unprepare(rng->clk);
return currsize;
}
static int msm_rng_init(struct hwrng *hwrng)
{
return msm_rng_enable(hwrng, 1);
}
static void msm_rng_cleanup(struct hwrng *hwrng)
{
msm_rng_enable(hwrng, 0);
}
static int msm_rng_probe(struct platform_device *pdev)
{
struct resource *res;
struct msm_rng *rng;
int ret;
rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
if (!rng)
return -ENOMEM;
platform_set_drvdata(pdev, rng);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rng->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(rng->base))
return PTR_ERR(rng->base);
rng->clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(rng->clk))
return PTR_ERR(rng->clk);
rng->hwrng.name = KBUILD_MODNAME,
rng->hwrng.init = msm_rng_init,
rng->hwrng.cleanup = msm_rng_cleanup,
rng->hwrng.read = msm_rng_read,
ret = hwrng_register(&rng->hwrng);
if (ret) {
dev_err(&pdev->dev, "failed to register hwrng\n");
return ret;
}
return 0;
}
static int msm_rng_remove(struct platform_device *pdev)
{
struct msm_rng *rng = platform_get_drvdata(pdev);
hwrng_unregister(&rng->hwrng);
return 0;
}
static const struct of_device_id msm_rng_of_match[] = {
{ .compatible = "qcom,prng", },
{}
};
MODULE_DEVICE_TABLE(of, msm_rng_of_match);
static struct platform_driver msm_rng_driver = {
.probe = msm_rng_probe,
.remove = msm_rng_remove,
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(msm_rng_of_match),
}
};
module_platform_driver(msm_rng_driver);
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_AUTHOR("The Linux Foundation");
MODULE_DESCRIPTION("Qualcomm MSM random number generator driver");
MODULE_LICENSE("GPL v2");
/*
* omap3-rom-rng.c - RNG driver for TI OMAP3 CPU family
*
* Copyright (C) 2009 Nokia Corporation
* Author: Juha Yrjola <juha.yrjola@solidboot.com>
*
* Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/hw_random.h>
#include <linux/timer.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#define RNG_RESET 0x01
#define RNG_GEN_PRNG_HW_INIT 0x02
#define RNG_GEN_HW 0x08
/* param1: ptr, param2: count, param3: flag */
static u32 (*omap3_rom_rng_call)(u32, u32, u32);
static struct timer_list idle_timer;
static int rng_idle;
static struct clk *rng_clk;
static void omap3_rom_rng_idle(unsigned long data)
{
int r;
r = omap3_rom_rng_call(0, 0, RNG_RESET);
if (r != 0) {
pr_err("reset failed: %d\n", r);
return;
}
clk_disable_unprepare(rng_clk);
rng_idle = 1;
}
static int omap3_rom_rng_get_random(void *buf, unsigned int count)
{
u32 r;
u32 ptr;
del_timer_sync(&idle_timer);
if (rng_idle) {
clk_prepare_enable(rng_clk);
r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
if (r != 0) {
clk_disable_unprepare(rng_clk);
pr_err("HW init failed: %d\n", r);
return -EIO;
}
rng_idle = 0;
}
ptr = virt_to_phys(buf);
r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW);
mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500));
if (r != 0)
return -EINVAL;
return 0;
}
static int omap3_rom_rng_data_present(struct hwrng *rng, int wait)
{
return 1;
}
static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data)
{
int r;
r = omap3_rom_rng_get_random(data, 4);
if (r < 0)
return r;
return 4;
}
static struct hwrng omap3_rom_rng_ops = {
.name = "omap3-rom",
.data_present = omap3_rom_rng_data_present,
.data_read = omap3_rom_rng_data_read,
};
static int omap3_rom_rng_probe(struct platform_device *pdev)
{
pr_info("initializing\n");
omap3_rom_rng_call = pdev->dev.platform_data;
if (!omap3_rom_rng_call) {
pr_err("omap3_rom_rng_call is NULL\n");
return -EINVAL;
}
setup_timer(&idle_timer, omap3_rom_rng_idle, 0);
rng_clk = clk_get(&pdev->dev, "ick");
if (IS_ERR(rng_clk)) {
pr_err("unable to get RNG clock\n");
return PTR_ERR(rng_clk);
}
/* Leave the RNG in reset state. */
clk_prepare_enable(rng_clk);
omap3_rom_rng_idle(0);
return hwrng_register(&omap3_rom_rng_ops);
}
static int omap3_rom_rng_remove(struct platform_device *pdev)
{
hwrng_unregister(&omap3_rom_rng_ops);
clk_disable_unprepare(rng_clk);
clk_put(rng_clk);
return 0;
}
static struct platform_driver omap3_rom_rng_driver = {
.driver = {
.name = "omap3-rom-rng",
.owner = THIS_MODULE,
},
.probe = omap3_rom_rng_probe,
.remove = omap3_rom_rng_remove,
};
module_platform_driver(omap3_rom_rng_driver);
MODULE_ALIAS("platform:omap3-rom-rng");
MODULE_AUTHOR("Juha Yrjola");
MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
MODULE_LICENSE("GPL");
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#include <linux/hw_random.h> #include <linux/hw_random.h>
#include <asm/vio.h> #include <asm/vio.h>
#define MODULE_NAME "pseries-rng"
static int pseries_rng_data_read(struct hwrng *rng, u32 *data) static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
{ {
...@@ -55,7 +54,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev) ...@@ -55,7 +54,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
}; };
static struct hwrng pseries_rng = { static struct hwrng pseries_rng = {
.name = MODULE_NAME, .name = KBUILD_MODNAME,
.data_read = pseries_rng_data_read, .data_read = pseries_rng_data_read,
}; };
...@@ -78,7 +77,7 @@ static struct vio_device_id pseries_rng_driver_ids[] = { ...@@ -78,7 +77,7 @@ static struct vio_device_id pseries_rng_driver_ids[] = {
MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids); MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids);
static struct vio_driver pseries_rng_driver = { static struct vio_driver pseries_rng_driver = {
.name = MODULE_NAME, .name = KBUILD_MODNAME,
.probe = pseries_rng_probe, .probe = pseries_rng_probe,
.remove = pseries_rng_remove, .remove = pseries_rng_remove,
.get_desired_dma = pseries_rng_get_desired_dma, .get_desired_dma = pseries_rng_get_desired_dma,
......
...@@ -221,7 +221,7 @@ static void __exit mod_exit(void) ...@@ -221,7 +221,7 @@ static void __exit mod_exit(void)
module_init(mod_init); module_init(mod_init);
module_exit(mod_exit); module_exit(mod_exit);
static struct x86_cpu_id via_rng_cpu_id[] = { static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_XSTORE), X86_FEATURE_MATCH(X86_FEATURE_XSTORE),
{} {}
}; };
......
...@@ -4,16 +4,29 @@ config CRYPTO_DEV_FSL_CAAM ...@@ -4,16 +4,29 @@ config CRYPTO_DEV_FSL_CAAM
help help
Enables the driver module for Freescale's Cryptographic Accelerator Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
This module adds a job ring operation interface, and configures h/w This module creates job ring devices, and configures h/w
to operate as a DPAA component automatically, depending to operate as a DPAA component automatically, depending
on h/w feature availability. on h/w feature availability.
To compile this driver as a module, choose M here: the module To compile this driver as a module, choose M here: the module
will be called caam. will be called caam.
config CRYPTO_DEV_FSL_CAAM_JR
tristate "Freescale CAAM Job Ring driver backend"
depends on CRYPTO_DEV_FSL_CAAM
default y
help
Enables the driver module for Job Rings which are part of
Freescale's Cryptographic Accelerator
and Assurance Module (CAAM). This module adds a job ring operation
interface.
To compile this driver as a module, choose M here: the module
will be called caam_jr.
config CRYPTO_DEV_FSL_CAAM_RINGSIZE config CRYPTO_DEV_FSL_CAAM_RINGSIZE
int "Job Ring size" int "Job Ring size"
depends on CRYPTO_DEV_FSL_CAAM depends on CRYPTO_DEV_FSL_CAAM_JR
range 2 9 range 2 9
default "9" default "9"
help help
...@@ -31,7 +44,7 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE ...@@ -31,7 +44,7 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
config CRYPTO_DEV_FSL_CAAM_INTC config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing" bool "Job Ring interrupt coalescing"
depends on CRYPTO_DEV_FSL_CAAM depends on CRYPTO_DEV_FSL_CAAM_JR
default n default n
help help
Enable the Job Ring's interrupt coalescing feature. Enable the Job Ring's interrupt coalescing feature.
...@@ -62,7 +75,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD ...@@ -62,7 +75,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API" tristate "Register algorithm implementations with the Crypto API"
depends on CRYPTO_DEV_FSL_CAAM depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y default y
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_AUTHENC select CRYPTO_AUTHENC
...@@ -76,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API ...@@ -76,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
config CRYPTO_DEV_FSL_CAAM_AHASH_API config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API" tristate "Register hash algorithm implementations with Crypto API"
depends on CRYPTO_DEV_FSL_CAAM depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y default y
select CRYPTO_HASH select CRYPTO_HASH
help help
...@@ -88,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API ...@@ -88,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
config CRYPTO_DEV_FSL_CAAM_RNG_API config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API" tristate "Register caam device for hwrng API"
depends on CRYPTO_DEV_FSL_CAAM depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y default y
select CRYPTO_RNG select CRYPTO_RNG
select HW_RANDOM select HW_RANDOM
......
...@@ -6,8 +6,10 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y) ...@@ -6,8 +6,10 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
endif endif
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
caam-objs := ctrl.o jr.o error.o key_gen.o caam-objs := ctrl.o
caam_jr-objs := jr.o key_gen.o error.o
...@@ -86,6 +86,7 @@ ...@@ -86,6 +86,7 @@
#else #else
#define debug(format, arg...) #define debug(format, arg...)
#endif #endif
static struct list_head alg_list;
/* Set DK bit in class 1 operation if shared */ /* Set DK bit in class 1 operation if shared */
static inline void append_dec_op1(u32 *desc, u32 type) static inline void append_dec_op1(u32 *desc, u32 type)
...@@ -2057,7 +2058,6 @@ static struct caam_alg_template driver_algs[] = { ...@@ -2057,7 +2058,6 @@ static struct caam_alg_template driver_algs[] = {
struct caam_crypto_alg { struct caam_crypto_alg {
struct list_head entry; struct list_head entry;
struct device *ctrldev;
int class1_alg_type; int class1_alg_type;
int class2_alg_type; int class2_alg_type;
int alg_op; int alg_op;
...@@ -2070,14 +2070,12 @@ static int caam_cra_init(struct crypto_tfm *tfm) ...@@ -2070,14 +2070,12 @@ static int caam_cra_init(struct crypto_tfm *tfm)
struct caam_crypto_alg *caam_alg = struct caam_crypto_alg *caam_alg =
container_of(alg, struct caam_crypto_alg, crypto_alg); container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm); struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
int tgt_jr = atomic_inc_return(&priv->tfm_count);
/* ctx->jrdev = caam_jr_alloc();
* distribute tfms across job rings to ensure in-order if (IS_ERR(ctx->jrdev)) {
* crypto request processing per tfm pr_err("Job Ring Device allocation for transform failed\n");
*/ return PTR_ERR(ctx->jrdev);
ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs]; }
/* copy descriptor header template value */ /* copy descriptor header template value */
ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
...@@ -2104,44 +2102,26 @@ static void caam_cra_exit(struct crypto_tfm *tfm) ...@@ -2104,44 +2102,26 @@ static void caam_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
desc_bytes(ctx->sh_desc_givenc), desc_bytes(ctx->sh_desc_givenc),
DMA_TO_DEVICE); DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev);
} }
static void __exit caam_algapi_exit(void) static void __exit caam_algapi_exit(void)
{ {
struct device_node *dev_node;
struct platform_device *pdev;
struct device *ctrldev;
struct caam_drv_private *priv;
struct caam_crypto_alg *t_alg, *n; struct caam_crypto_alg *t_alg, *n;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); if (!alg_list.next)
if (!dev_node) {
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
if (!dev_node)
return;
}
pdev = of_find_device_by_node(dev_node);
if (!pdev)
return;
ctrldev = &pdev->dev;
of_node_put(dev_node);
priv = dev_get_drvdata(ctrldev);
if (!priv->alg_list.next)
return; return;
list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
crypto_unregister_alg(&t_alg->crypto_alg); crypto_unregister_alg(&t_alg->crypto_alg);
list_del(&t_alg->entry); list_del(&t_alg->entry);
kfree(t_alg); kfree(t_alg);
} }
} }
static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
struct caam_alg_template
*template) *template)
{ {
struct caam_crypto_alg *t_alg; struct caam_crypto_alg *t_alg;
...@@ -2149,7 +2129,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, ...@@ -2149,7 +2129,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
if (!t_alg) { if (!t_alg) {
dev_err(ctrldev, "failed to allocate t_alg\n"); pr_err("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -2181,62 +2161,39 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, ...@@ -2181,62 +2161,39 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
t_alg->class1_alg_type = template->class1_alg_type; t_alg->class1_alg_type = template->class1_alg_type;
t_alg->class2_alg_type = template->class2_alg_type; t_alg->class2_alg_type = template->class2_alg_type;
t_alg->alg_op = template->alg_op; t_alg->alg_op = template->alg_op;
t_alg->ctrldev = ctrldev;
return t_alg; return t_alg;
} }
static int __init caam_algapi_init(void) static int __init caam_algapi_init(void)
{ {
struct device_node *dev_node;
struct platform_device *pdev;
struct device *ctrldev;
struct caam_drv_private *priv;
int i = 0, err = 0; int i = 0, err = 0;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); INIT_LIST_HEAD(&alg_list);
if (!dev_node) {
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
if (!dev_node)
return -ENODEV;
}
pdev = of_find_device_by_node(dev_node);
if (!pdev)
return -ENODEV;
ctrldev = &pdev->dev;
priv = dev_get_drvdata(ctrldev);
of_node_put(dev_node);
INIT_LIST_HEAD(&priv->alg_list);
atomic_set(&priv->tfm_count, -1);
/* register crypto algorithms the device supports */ /* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
/* TODO: check if h/w supports alg */ /* TODO: check if h/w supports alg */
struct caam_crypto_alg *t_alg; struct caam_crypto_alg *t_alg;
t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); t_alg = caam_alg_alloc(&driver_algs[i]);
if (IS_ERR(t_alg)) { if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg); err = PTR_ERR(t_alg);
dev_warn(ctrldev, "%s alg allocation failed\n", pr_warn("%s alg allocation failed\n",
driver_algs[i].driver_name); driver_algs[i].driver_name);
continue; continue;
} }
err = crypto_register_alg(&t_alg->crypto_alg); err = crypto_register_alg(&t_alg->crypto_alg);
if (err) { if (err) {
dev_warn(ctrldev, "%s alg registration failed\n", pr_warn("%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name); t_alg->crypto_alg.cra_driver_name);
kfree(t_alg); kfree(t_alg);
} else } else
list_add_tail(&t_alg->entry, &priv->alg_list); list_add_tail(&t_alg->entry, &alg_list);
} }
if (!list_empty(&priv->alg_list)) if (!list_empty(&alg_list))
dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", pr_info("caam algorithms registered in /proc/crypto\n");
(char *)of_get_property(dev_node, "compatible", NULL));
return err; return err;
} }
......
...@@ -94,6 +94,9 @@ ...@@ -94,6 +94,9 @@
#define debug(format, arg...) #define debug(format, arg...)
#endif #endif
static struct list_head hash_list;
/* ahash per-session context */ /* ahash per-session context */
struct caam_hash_ctx { struct caam_hash_ctx {
struct device *jrdev; struct device *jrdev;
...@@ -1653,7 +1656,6 @@ static struct caam_hash_template driver_hash[] = { ...@@ -1653,7 +1656,6 @@ static struct caam_hash_template driver_hash[] = {
struct caam_hash_alg { struct caam_hash_alg {
struct list_head entry; struct list_head entry;
struct device *ctrldev;
int alg_type; int alg_type;
int alg_op; int alg_op;
struct ahash_alg ahash_alg; struct ahash_alg ahash_alg;
...@@ -1670,7 +1672,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) ...@@ -1670,7 +1672,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
struct caam_hash_alg *caam_hash = struct caam_hash_alg *caam_hash =
container_of(alg, struct caam_hash_alg, ahash_alg); container_of(alg, struct caam_hash_alg, ahash_alg);
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
HASH_MSG_LEN + SHA1_DIGEST_SIZE, HASH_MSG_LEN + SHA1_DIGEST_SIZE,
...@@ -1678,15 +1679,17 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) ...@@ -1678,15 +1679,17 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + SHA256_DIGEST_SIZE, HASH_MSG_LEN + SHA256_DIGEST_SIZE,
HASH_MSG_LEN + 64, HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE }; HASH_MSG_LEN + SHA512_DIGEST_SIZE };
int tgt_jr = atomic_inc_return(&priv->tfm_count);
int ret = 0; int ret = 0;
/* /*
* distribute tfms across job rings to ensure in-order * Get a Job ring from Job Ring driver to ensure in-order
* crypto request processing per tfm * crypto request processing per tfm
*/ */
ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs]; ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(ctx->jrdev);
}
/* copy descriptor header template value */ /* copy descriptor header template value */
ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
...@@ -1729,35 +1732,18 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) ...@@ -1729,35 +1732,18 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev);
} }
static void __exit caam_algapi_hash_exit(void) static void __exit caam_algapi_hash_exit(void)
{ {
struct device_node *dev_node;
struct platform_device *pdev;
struct device *ctrldev;
struct caam_drv_private *priv;
struct caam_hash_alg *t_alg, *n; struct caam_hash_alg *t_alg, *n;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); if (!hash_list.next)
if (!dev_node) {
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
if (!dev_node)
return;
}
pdev = of_find_device_by_node(dev_node);
if (!pdev)
return; return;
ctrldev = &pdev->dev; list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
of_node_put(dev_node);
priv = dev_get_drvdata(ctrldev);
if (!priv->hash_list.next)
return;
list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
crypto_unregister_ahash(&t_alg->ahash_alg); crypto_unregister_ahash(&t_alg->ahash_alg);
list_del(&t_alg->entry); list_del(&t_alg->entry);
kfree(t_alg); kfree(t_alg);
...@@ -1765,7 +1751,7 @@ static void __exit caam_algapi_hash_exit(void) ...@@ -1765,7 +1751,7 @@ static void __exit caam_algapi_hash_exit(void)
} }
static struct caam_hash_alg * static struct caam_hash_alg *
caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, caam_hash_alloc(struct caam_hash_template *template,
bool keyed) bool keyed)
{ {
struct caam_hash_alg *t_alg; struct caam_hash_alg *t_alg;
...@@ -1774,7 +1760,7 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, ...@@ -1774,7 +1760,7 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
if (!t_alg) { if (!t_alg) {
dev_err(ctrldev, "failed to allocate t_alg\n"); pr_err("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1805,37 +1791,15 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, ...@@ -1805,37 +1791,15 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
t_alg->alg_type = template->alg_type; t_alg->alg_type = template->alg_type;
t_alg->alg_op = template->alg_op; t_alg->alg_op = template->alg_op;
t_alg->ctrldev = ctrldev;
return t_alg; return t_alg;
} }
static int __init caam_algapi_hash_init(void) static int __init caam_algapi_hash_init(void)
{ {
struct device_node *dev_node;
struct platform_device *pdev;
struct device *ctrldev;
struct caam_drv_private *priv;
int i = 0, err = 0; int i = 0, err = 0;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); INIT_LIST_HEAD(&hash_list);
if (!dev_node) {
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
if (!dev_node)
return -ENODEV;
}
pdev = of_find_device_by_node(dev_node);
if (!pdev)
return -ENODEV;
ctrldev = &pdev->dev;
priv = dev_get_drvdata(ctrldev);
of_node_put(dev_node);
INIT_LIST_HEAD(&priv->hash_list);
atomic_set(&priv->tfm_count, -1);
/* register crypto algorithms the device supports */ /* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
...@@ -1843,38 +1807,38 @@ static int __init caam_algapi_hash_init(void) ...@@ -1843,38 +1807,38 @@ static int __init caam_algapi_hash_init(void)
struct caam_hash_alg *t_alg; struct caam_hash_alg *t_alg;
/* register hmac version */ /* register hmac version */
t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true); t_alg = caam_hash_alloc(&driver_hash[i], true);
if (IS_ERR(t_alg)) { if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg); err = PTR_ERR(t_alg);
dev_warn(ctrldev, "%s alg allocation failed\n", pr_warn("%s alg allocation failed\n",
driver_hash[i].driver_name); driver_hash[i].driver_name);
continue; continue;
} }
err = crypto_register_ahash(&t_alg->ahash_alg); err = crypto_register_ahash(&t_alg->ahash_alg);
if (err) { if (err) {
dev_warn(ctrldev, "%s alg registration failed\n", pr_warn("%s alg registration failed\n",
t_alg->ahash_alg.halg.base.cra_driver_name); t_alg->ahash_alg.halg.base.cra_driver_name);
kfree(t_alg); kfree(t_alg);
} else } else
list_add_tail(&t_alg->entry, &priv->hash_list); list_add_tail(&t_alg->entry, &hash_list);
/* register unkeyed version */ /* register unkeyed version */
t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false); t_alg = caam_hash_alloc(&driver_hash[i], false);
if (IS_ERR(t_alg)) { if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg); err = PTR_ERR(t_alg);
dev_warn(ctrldev, "%s alg allocation failed\n", pr_warn("%s alg allocation failed\n",
driver_hash[i].driver_name); driver_hash[i].driver_name);
continue; continue;
} }
err = crypto_register_ahash(&t_alg->ahash_alg); err = crypto_register_ahash(&t_alg->ahash_alg);
if (err) { if (err) {
dev_warn(ctrldev, "%s alg registration failed\n", pr_warn("%s alg registration failed\n",
t_alg->ahash_alg.halg.base.cra_driver_name); t_alg->ahash_alg.halg.base.cra_driver_name);
kfree(t_alg); kfree(t_alg);
} else } else
list_add_tail(&t_alg->entry, &priv->hash_list); list_add_tail(&t_alg->entry, &hash_list);
} }
return err; return err;
......
...@@ -273,34 +273,23 @@ static struct hwrng caam_rng = { ...@@ -273,34 +273,23 @@ static struct hwrng caam_rng = {
static void __exit caam_rng_exit(void) static void __exit caam_rng_exit(void)
{ {
caam_jr_free(rng_ctx.jrdev);
hwrng_unregister(&caam_rng); hwrng_unregister(&caam_rng);
} }
static int __init caam_rng_init(void) static int __init caam_rng_init(void)
{ {
struct device_node *dev_node; struct device *dev;
struct platform_device *pdev;
struct device *ctrldev;
struct caam_drv_private *priv;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node) {
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
if (!dev_node)
return -ENODEV;
}
pdev = of_find_device_by_node(dev_node);
if (!pdev)
return -ENODEV;
ctrldev = &pdev->dev; dev = caam_jr_alloc();
priv = dev_get_drvdata(ctrldev); if (IS_ERR(dev)) {
of_node_put(dev_node); pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(dev);
}
caam_init_rng(&rng_ctx, priv->jrdev[0]); caam_init_rng(&rng_ctx, dev);
dev_info(priv->jrdev[0], "registering rng-caam\n"); dev_info(dev, "registering rng-caam\n");
return hwrng_register(&caam_rng); return hwrng_register(&caam_rng);
} }
......
This diff is collapsed.
...@@ -1155,8 +1155,15 @@ struct sec4_sg_entry { ...@@ -1155,8 +1155,15 @@ struct sec4_sg_entry {
/* randomizer AAI set */ /* randomizer AAI set */
#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT) #define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_RNG_NOZERO (0x10 << OP_ALG_AAI_SHIFT) #define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_RNG_ODD (0x20 << OP_ALG_AAI_SHIFT) #define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT)
/* RNG4 AAI set */
#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
/* hmac/smac AAI set */ /* hmac/smac AAI set */
#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT) #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
...@@ -1178,12 +1185,6 @@ struct sec4_sg_entry { ...@@ -1178,12 +1185,6 @@ struct sec4_sg_entry {
#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) #define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) #define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
/* RNG4 set */
#define OP_ALG_RNG4_SHIFT 4
#define OP_ALG_RNG4_MASK (0x1f3 << OP_ALG_RNG4_SHIFT)
#define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT)
#define OP_ALG_AS_SHIFT 2 #define OP_ALG_AS_SHIFT 2
#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT) #define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT) #define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
......
...@@ -37,13 +37,16 @@ struct caam_jrentry_info { ...@@ -37,13 +37,16 @@ struct caam_jrentry_info {
/* Private sub-storage for a single JobR */ /* Private sub-storage for a single JobR */
struct caam_drv_private_jr { struct caam_drv_private_jr {
struct device *parentdev; /* points back to controller dev */ struct list_head list_node; /* Job Ring device list */
struct platform_device *jr_pdev;/* points to platform device for JR */ struct device *dev;
int ridx; int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */ struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct tasklet_struct irqtask; struct tasklet_struct irqtask;
int irq; /* One per queue */ int irq; /* One per queue */
/* Number of scatterlist crypt transforms active on the JobR */
atomic_t tfm_count ____cacheline_aligned;
/* Job ring info */ /* Job ring info */
int ringsize; /* Size of rings (assume input = output) */ int ringsize; /* Size of rings (assume input = output) */
struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */ struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
...@@ -63,7 +66,7 @@ struct caam_drv_private_jr { ...@@ -63,7 +66,7 @@ struct caam_drv_private_jr {
struct caam_drv_private { struct caam_drv_private {
struct device *dev; struct device *dev;
struct device **jrdev; /* Alloc'ed array per sub-device */ struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
struct platform_device *pdev; struct platform_device *pdev;
/* Physical-presence section */ /* Physical-presence section */
...@@ -80,12 +83,11 @@ struct caam_drv_private { ...@@ -80,12 +83,11 @@ struct caam_drv_private {
u8 qi_present; /* Nonzero if QI present in device */ u8 qi_present; /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */ int secvio_irq; /* Security violation interrupt number */
/* which jr allocated to scatterlist crypto */ #define RNG4_MAX_HANDLES 2
atomic_t tfm_count ____cacheline_aligned; /* RNG4 block */
/* list of registered crypto algorithms (mk generic context handle?) */ u32 rng4_sh_init; /* This bitmap shows which of the State
struct list_head alg_list; Handles of the RNG4 block are initialized
/* list of registered hash algorithms (mk generic context handle?) */ by this driver */
struct list_head hash_list;
/* /*
* debugfs entries for developer view into driver/device * debugfs entries for developer view into driver/device
......
This diff is collapsed.
...@@ -8,12 +8,11 @@ ...@@ -8,12 +8,11 @@
#define JR_H #define JR_H
/* Prototypes for backend-level services exposed to APIs */ /* Prototypes for backend-level services exposed to APIs */
struct device *caam_jr_alloc(void);
void caam_jr_free(struct device *rdev);
int caam_jr_enqueue(struct device *dev, u32 *desc, int caam_jr_enqueue(struct device *dev, u32 *desc,
void (*cbk)(struct device *dev, u32 *desc, u32 status, void (*cbk)(struct device *dev, u32 *desc, u32 status,
void *areq), void *areq),
void *areq); void *areq);
extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
int ring);
extern int caam_jr_shutdown(struct device *dev);
#endif /* JR_H */ #endif /* JR_H */
...@@ -245,7 +245,7 @@ struct rngtst { ...@@ -245,7 +245,7 @@ struct rngtst {
/* RNG4 TRNG test registers */ /* RNG4 TRNG test registers */
struct rng4tst { struct rng4tst {
#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */ #define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
u32 rtmctl; /* misc. control register */ u32 rtmctl; /* misc. control register */
u32 rtscmisc; /* statistical check misc. register */ u32 rtscmisc; /* statistical check misc. register */
u32 rtpkrrng; /* poker range register */ u32 rtpkrrng; /* poker range register */
...@@ -255,6 +255,8 @@ struct rng4tst { ...@@ -255,6 +255,8 @@ struct rng4tst {
}; };
#define RTSDCTL_ENT_DLY_SHIFT 16 #define RTSDCTL_ENT_DLY_SHIFT 16
#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT) #define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
#define RTSDCTL_ENT_DLY_MIN 1200
#define RTSDCTL_ENT_DLY_MAX 12800
u32 rtsdctl; /* seed control register */ u32 rtsdctl; /* seed control register */
union { union {
u32 rtsblim; /* PRGM=1: sparse bit limit register */ u32 rtsblim; /* PRGM=1: sparse bit limit register */
...@@ -266,7 +268,11 @@ struct rng4tst { ...@@ -266,7 +268,11 @@ struct rng4tst {
u32 rtfrqcnt; /* PRGM=0: freq. count register */ u32 rtfrqcnt; /* PRGM=0: freq. count register */
}; };
u32 rsvd1[40]; u32 rsvd1[40];
#define RDSTA_SKVT 0x80000000
#define RDSTA_SKVN 0x40000000
#define RDSTA_IF0 0x00000001 #define RDSTA_IF0 0x00000001
#define RDSTA_IF1 0x00000002
#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0)
u32 rdsta; u32 rdsta;
u32 rsvd2[15]; u32 rsvd2[15];
}; };
...@@ -692,6 +698,7 @@ struct caam_deco { ...@@ -692,6 +698,7 @@ struct caam_deco {
u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */ u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */
u32 jr_ctl_lo; u32 jr_ctl_lo;
u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */ u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */
#define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF
u32 op_status_hi; /* DxOPSTA - DECO Operation Status */ u32 op_status_hi; /* DxOPSTA - DECO Operation Status */
u32 op_status_lo; u32 op_status_lo;
u32 rsvd24[2]; u32 rsvd24[2];
...@@ -706,12 +713,13 @@ struct caam_deco { ...@@ -706,12 +713,13 @@ struct caam_deco {
u32 rsvd29[48]; u32 rsvd29[48];
u32 descbuf[64]; /* DxDESB - Descriptor buffer */ u32 descbuf[64]; /* DxDESB - Descriptor buffer */
u32 rscvd30[193]; u32 rscvd30[193];
#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
#define DESC_DBG_DECO_STAT_VALID 0x80000000
#define DESC_DBG_DECO_STAT_MASK 0x00F00000
u32 desc_dbg; /* DxDDR - DECO Debug Register */ u32 desc_dbg; /* DxDDR - DECO Debug Register */
u32 rsvd31[126]; u32 rsvd31[126];
}; };
/* DECO DBG Register Valid Bit*/
#define DECO_DBG_VALID 0x80000000
#define DECO_JQCR_WHL 0x20000000 #define DECO_JQCR_WHL 0x20000000
#define DECO_JQCR_FOUR 0x10000000 #define DECO_JQCR_FOUR 0x10000000
......
...@@ -117,6 +117,21 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, ...@@ -117,6 +117,21 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
return nents; return nents;
} }
/* Map SG page in kernel virtual address space and copy */
static inline void sg_map_copy(u8 *dest, struct scatterlist *sg,
int len, int offset)
{
u8 *mapped_addr;
/*
* Page here can be user-space pinned using get_user_pages
* Same must be kmapped before use and kunmapped subsequently
*/
mapped_addr = kmap_atomic(sg_page(sg));
memcpy(dest, mapped_addr + offset, len);
kunmap_atomic(mapped_addr);
}
/* Copy from len bytes of sg to dest, starting from beginning */ /* Copy from len bytes of sg to dest, starting from beginning */
static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
{ {
...@@ -124,15 +139,15 @@ static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) ...@@ -124,15 +139,15 @@ static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
int cpy_index = 0, next_cpy_index = current_sg->length; int cpy_index = 0, next_cpy_index = current_sg->length;
while (next_cpy_index < len) { while (next_cpy_index < len) {
memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), sg_map_copy(dest + cpy_index, current_sg, current_sg->length,
current_sg->length); current_sg->offset);
current_sg = scatterwalk_sg_next(current_sg); current_sg = scatterwalk_sg_next(current_sg);
cpy_index = next_cpy_index; cpy_index = next_cpy_index;
next_cpy_index += current_sg->length; next_cpy_index += current_sg->length;
} }
if (cpy_index < len) if (cpy_index < len)
memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), sg_map_copy(dest + cpy_index, current_sg, len-cpy_index,
len - cpy_index); current_sg->offset);
} }
/* Copy sg data, from to_skip to end, to dest */ /* Copy sg data, from to_skip to end, to dest */
...@@ -140,7 +155,7 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg, ...@@ -140,7 +155,7 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
int to_skip, unsigned int end) int to_skip, unsigned int end)
{ {
struct scatterlist *current_sg = sg; struct scatterlist *current_sg = sg;
int sg_index, cpy_index; int sg_index, cpy_index, offset;
sg_index = current_sg->length; sg_index = current_sg->length;
while (sg_index <= to_skip) { while (sg_index <= to_skip) {
...@@ -148,9 +163,10 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg, ...@@ -148,9 +163,10 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
sg_index += current_sg->length; sg_index += current_sg->length;
} }
cpy_index = sg_index - to_skip; cpy_index = sg_index - to_skip;
memcpy(dest, (u8 *) sg_virt(current_sg) + offset = current_sg->offset + current_sg->length - cpy_index;
current_sg->length - cpy_index, cpy_index); sg_map_copy(dest, current_sg, cpy_index, offset);
current_sg = scatterwalk_sg_next(current_sg); if (end - sg_index) {
if (end - sg_index) current_sg = scatterwalk_sg_next(current_sg);
sg_copy(dest + cpy_index, current_sg, end - sg_index); sg_copy(dest + cpy_index, current_sg, end - sg_index);
}
} }
...@@ -733,12 +733,9 @@ static int dcp_probe(struct platform_device *pdev) ...@@ -733,12 +733,9 @@ static int dcp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev); platform_set_drvdata(pdev, dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0); r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) { dev->dcp_regs_base = devm_ioremap_resource(&pdev->dev, r);
dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n"); if (IS_ERR(dev->dcp_regs_base))
return -ENXIO; return PTR_ERR(dev->dcp_regs_base);
}
dev->dcp_regs_base = devm_ioremap(&pdev->dev, r->start,
resource_size(r));
dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL); dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL);
udelay(10); udelay(10);
...@@ -762,7 +759,8 @@ static int dcp_probe(struct platform_device *pdev) ...@@ -762,7 +759,8 @@ static int dcp_probe(struct platform_device *pdev)
return -EIO; return -EIO;
} }
dev->dcp_vmi_irq = r->start; dev->dcp_vmi_irq = r->start;
ret = request_irq(dev->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp", dev); ret = devm_request_irq(&pdev->dev, dev->dcp_vmi_irq, dcp_vmi_irq, 0,
"dcp", dev);
if (ret != 0) { if (ret != 0) {
dev_err(&pdev->dev, "can't request_irq (0)\n"); dev_err(&pdev->dev, "can't request_irq (0)\n");
return -EIO; return -EIO;
...@@ -771,15 +769,14 @@ static int dcp_probe(struct platform_device *pdev) ...@@ -771,15 +769,14 @@ static int dcp_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_IRQ, 1); r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
if (!r) { if (!r) {
dev_err(&pdev->dev, "can't get IRQ resource (1)\n"); dev_err(&pdev->dev, "can't get IRQ resource (1)\n");
ret = -EIO; return -EIO;
goto err_free_irq0;
} }
dev->dcp_irq = r->start; dev->dcp_irq = r->start;
ret = request_irq(dev->dcp_irq, dcp_irq, 0, "dcp", dev); ret = devm_request_irq(&pdev->dev, dev->dcp_irq, dcp_irq, 0, "dcp",
dev);
if (ret != 0) { if (ret != 0) {
dev_err(&pdev->dev, "can't request_irq (1)\n"); dev_err(&pdev->dev, "can't request_irq (1)\n");
ret = -EIO; return -EIO;
goto err_free_irq0;
} }
dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev, dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev,
...@@ -788,8 +785,7 @@ static int dcp_probe(struct platform_device *pdev) ...@@ -788,8 +785,7 @@ static int dcp_probe(struct platform_device *pdev)
GFP_KERNEL); GFP_KERNEL);
if (!dev->hw_pkg[0]) { if (!dev->hw_pkg[0]) {
dev_err(&pdev->dev, "Could not allocate hw descriptors\n"); dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
ret = -ENOMEM; return -ENOMEM;
goto err_free_irq1;
} }
for (i = 1; i < DCP_MAX_PKG; i++) { for (i = 1; i < DCP_MAX_PKG; i++) {
...@@ -848,16 +844,14 @@ static int dcp_probe(struct platform_device *pdev) ...@@ -848,16 +844,14 @@ static int dcp_probe(struct platform_device *pdev)
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
crypto_unregister_alg(&algs[j]); crypto_unregister_alg(&algs[j]);
err_free_key_iv: err_free_key_iv:
tasklet_kill(&dev->done_task);
tasklet_kill(&dev->queue_task);
dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
dev->payload_base_dma); dev->payload_base_dma);
err_free_hw_packet: err_free_hw_packet:
dma_free_coherent(&pdev->dev, DCP_MAX_PKG * dma_free_coherent(&pdev->dev, DCP_MAX_PKG *
sizeof(struct dcp_hw_packet), dev->hw_pkg[0], sizeof(struct dcp_hw_packet), dev->hw_pkg[0],
dev->hw_phys_pkg); dev->hw_phys_pkg);
err_free_irq1:
free_irq(dev->dcp_irq, dev);
err_free_irq0:
free_irq(dev->dcp_vmi_irq, dev);
return ret; return ret;
} }
...@@ -868,23 +862,20 @@ static int dcp_remove(struct platform_device *pdev) ...@@ -868,23 +862,20 @@ static int dcp_remove(struct platform_device *pdev)
int j; int j;
dev = platform_get_drvdata(pdev); dev = platform_get_drvdata(pdev);
dma_free_coherent(&pdev->dev, misc_deregister(&dev->dcp_bootstream_misc);
DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
dev->hw_pkg[0], dev->hw_phys_pkg);
dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
dev->payload_base_dma);
free_irq(dev->dcp_irq, dev); for (j = 0; j < ARRAY_SIZE(algs); j++)
free_irq(dev->dcp_vmi_irq, dev); crypto_unregister_alg(&algs[j]);
tasklet_kill(&dev->done_task); tasklet_kill(&dev->done_task);
tasklet_kill(&dev->queue_task); tasklet_kill(&dev->queue_task);
for (j = 0; j < ARRAY_SIZE(algs); j++) dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
crypto_unregister_alg(&algs[j]); dev->payload_base_dma);
misc_deregister(&dev->dcp_bootstream_misc); dma_free_coherent(&pdev->dev,
DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
dev->hw_pkg[0], dev->hw_phys_pkg);
return 0; return 0;
} }
......
...@@ -1149,32 +1149,24 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key, ...@@ -1149,32 +1149,24 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct ixp_ctx *ctx = crypto_aead_ctx(tfm); struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
struct rtattr *rta = (struct rtattr *)key; struct crypto_authenc_keys keys;
struct crypto_authenc_key_param *param;
if (!RTA_OK(rta, keylen)) if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey; goto badkey;
param = RTA_DATA(rta); if (keys.authkeylen > sizeof(ctx->authkey))
ctx->enckey_len = be32_to_cpu(param->enckeylen); goto badkey;
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < ctx->enckey_len) if (keys.enckeylen > sizeof(ctx->enckey))
goto badkey; goto badkey;
ctx->authkey_len = keylen - ctx->enckey_len; memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len); memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
memcpy(ctx->authkey, key, ctx->authkey_len); ctx->authkey_len = keys.authkeylen;
ctx->enckey_len = keys.enckeylen;
return aead_setup(tfm, crypto_aead_authsize(tfm)); return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey: badkey:
ctx->enckey_len = 0;
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL; return -EINVAL;
} }
......
...@@ -907,7 +907,7 @@ static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm) ...@@ -907,7 +907,7 @@ static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
} }
irqreturn_t crypto_int(int irq, void *priv) static irqreturn_t crypto_int(int irq, void *priv)
{ {
u32 val; u32 val;
...@@ -928,7 +928,7 @@ irqreturn_t crypto_int(int irq, void *priv) ...@@ -928,7 +928,7 @@ irqreturn_t crypto_int(int irq, void *priv)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
struct crypto_alg mv_aes_alg_ecb = { static struct crypto_alg mv_aes_alg_ecb = {
.cra_name = "ecb(aes)", .cra_name = "ecb(aes)",
.cra_driver_name = "mv-ecb-aes", .cra_driver_name = "mv-ecb-aes",
.cra_priority = 300, .cra_priority = 300,
...@@ -951,7 +951,7 @@ struct crypto_alg mv_aes_alg_ecb = { ...@@ -951,7 +951,7 @@ struct crypto_alg mv_aes_alg_ecb = {
}, },
}; };
struct crypto_alg mv_aes_alg_cbc = { static struct crypto_alg mv_aes_alg_cbc = {
.cra_name = "cbc(aes)", .cra_name = "cbc(aes)",
.cra_driver_name = "mv-cbc-aes", .cra_driver_name = "mv-cbc-aes",
.cra_priority = 300, .cra_priority = 300,
...@@ -975,7 +975,7 @@ struct crypto_alg mv_aes_alg_cbc = { ...@@ -975,7 +975,7 @@ struct crypto_alg mv_aes_alg_cbc = {
}, },
}; };
struct ahash_alg mv_sha1_alg = { static struct ahash_alg mv_sha1_alg = {
.init = mv_hash_init, .init = mv_hash_init,
.update = mv_hash_update, .update = mv_hash_update,
.final = mv_hash_final, .final = mv_hash_final,
...@@ -999,7 +999,7 @@ struct ahash_alg mv_sha1_alg = { ...@@ -999,7 +999,7 @@ struct ahash_alg mv_sha1_alg = {
} }
}; };
struct ahash_alg mv_hmac_sha1_alg = { static struct ahash_alg mv_hmac_sha1_alg = {
.init = mv_hash_init, .init = mv_hash_init,
.update = mv_hash_update, .update = mv_hash_update,
.final = mv_hash_final, .final = mv_hash_final,
...@@ -1084,7 +1084,7 @@ static int mv_probe(struct platform_device *pdev) ...@@ -1084,7 +1084,7 @@ static int mv_probe(struct platform_device *pdev)
goto err_unmap_sram; goto err_unmap_sram;
} }
ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev),
cp); cp);
if (ret) if (ret)
goto err_thread; goto err_thread;
...@@ -1187,7 +1187,7 @@ static struct platform_driver marvell_crypto = { ...@@ -1187,7 +1187,7 @@ static struct platform_driver marvell_crypto = {
.driver = { .driver = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "mv_crypto", .name = "mv_crypto",
.of_match_table = of_match_ptr(mv_cesa_of_match_table), .of_match_table = mv_cesa_of_match_table,
}, },
}; };
MODULE_ALIAS("platform:mv_crypto"); MODULE_ALIAS("platform:mv_crypto");
......
...@@ -275,7 +275,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd) ...@@ -275,7 +275,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
if (dd->flags & FLAGS_CBC) if (dd->flags & FLAGS_CBC)
val |= AES_REG_CTRL_CBC; val |= AES_REG_CTRL_CBC;
if (dd->flags & FLAGS_CTR) { if (dd->flags & FLAGS_CTR) {
val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32; val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK; mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
} }
if (dd->flags & FLAGS_ENCRYPT) if (dd->flags & FLAGS_ENCRYPT)
...@@ -554,7 +554,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) ...@@ -554,7 +554,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
return err; return err;
} }
int omap_aes_check_aligned(struct scatterlist *sg) static int omap_aes_check_aligned(struct scatterlist *sg)
{ {
while (sg) { while (sg) {
if (!IS_ALIGNED(sg->offset, 4)) if (!IS_ALIGNED(sg->offset, 4))
...@@ -566,7 +566,7 @@ int omap_aes_check_aligned(struct scatterlist *sg) ...@@ -566,7 +566,7 @@ int omap_aes_check_aligned(struct scatterlist *sg)
return 0; return 0;
} }
int omap_aes_copy_sgs(struct omap_aes_dev *dd) static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
{ {
void *buf_in, *buf_out; void *buf_in, *buf_out;
int pages; int pages;
......
...@@ -2033,3 +2033,4 @@ module_platform_driver(omap_sham_driver); ...@@ -2033,3 +2033,4 @@ module_platform_driver(omap_sham_driver);
MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Dmitry Kasatkin"); MODULE_AUTHOR("Dmitry Kasatkin");
MODULE_ALIAS("platform:omap-sham");
...@@ -495,45 +495,29 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, ...@@ -495,45 +495,29 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
{ {
struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
struct rtattr *rta = (void *)key; struct crypto_authenc_keys keys;
struct crypto_authenc_key_param *param;
unsigned int authkeylen, enckeylen;
int err = -EINVAL; int err = -EINVAL;
if (!RTA_OK(rta, keylen)) if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey; goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) if (keys.enckeylen > AES_MAX_KEY_SIZE)
goto badkey; goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param)) if (keys.authkeylen > sizeof(ctx->hash_ctx))
goto badkey;
param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < enckeylen)
goto badkey;
authkeylen = keylen - enckeylen;
if (enckeylen > AES_MAX_KEY_SIZE)
goto badkey; goto badkey;
if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
SPA_CTRL_CIPH_ALG_AES) SPA_CTRL_CIPH_ALG_AES)
err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen); err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen);
else else
err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen); err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen);
if (err) if (err)
goto badkey; goto badkey;
memcpy(ctx->hash_ctx, key, authkeylen); memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
ctx->hash_key_len = authkeylen; ctx->hash_key_len = keys.authkeylen;
return 0; return 0;
......
...@@ -1058,7 +1058,7 @@ static struct platform_driver sahara_driver = { ...@@ -1058,7 +1058,7 @@ static struct platform_driver sahara_driver = {
.driver = { .driver = {
.name = SAHARA_NAME, .name = SAHARA_NAME,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.of_match_table = of_match_ptr(sahara_dt_ids), .of_match_table = sahara_dt_ids,
}, },
.id_table = sahara_platform_ids, .id_table = sahara_platform_ids,
}; };
......
...@@ -673,39 +673,20 @@ static int aead_setkey(struct crypto_aead *authenc, ...@@ -673,39 +673,20 @@ static int aead_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen) const u8 *key, unsigned int keylen)
{ {
struct talitos_ctx *ctx = crypto_aead_ctx(authenc); struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct rtattr *rta = (void *)key; struct crypto_authenc_keys keys;
struct crypto_authenc_key_param *param;
unsigned int authkeylen;
unsigned int enckeylen;
if (!RTA_OK(rta, keylen))
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey; goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param)) if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
goto badkey; goto badkey;
param = RTA_DATA(rta); memcpy(ctx->key, keys.authkey, keys.authkeylen);
enckeylen = be32_to_cpu(param->enckeylen); memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < enckeylen)
goto badkey;
authkeylen = keylen - enckeylen; ctx->keylen = keys.authkeylen + keys.enckeylen;
ctx->enckeylen = keys.enckeylen;
if (keylen > TALITOS_MAX_KEY_SIZE) ctx->authkeylen = keys.authkeylen;
goto badkey;
memcpy(&ctx->key, key, keylen);
ctx->keylen = keylen;
ctx->enckeylen = enckeylen;
ctx->authkeylen = authkeylen;
return 0; return 0;
......
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/errno.h> #include <linux/errno.h>
...@@ -199,8 +201,6 @@ static void aes_workqueue_handler(struct work_struct *work); ...@@ -199,8 +201,6 @@ static void aes_workqueue_handler(struct work_struct *work);
static DECLARE_WORK(aes_work, aes_workqueue_handler); static DECLARE_WORK(aes_work, aes_workqueue_handler);
static struct workqueue_struct *aes_wq; static struct workqueue_struct *aes_wq;
extern unsigned long long tegra_chip_uid(void);
static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset) static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset)
{ {
return readl(dd->io_base + offset); return readl(dd->io_base + offset);
...@@ -713,13 +713,12 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed, ...@@ -713,13 +713,12 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
struct tegra_aes_dev *dd = aes_dev; struct tegra_aes_dev *dd = aes_dev;
struct tegra_aes_ctx *ctx = &rng_ctx; struct tegra_aes_ctx *ctx = &rng_ctx;
struct tegra_aes_slot *key_slot; struct tegra_aes_slot *key_slot;
struct timespec ts;
int ret = 0; int ret = 0;
u64 nsec, tmp[2]; u8 tmp[16]; /* 16 bytes = 128 bits of entropy */
u8 *dt; u8 *dt;
if (!ctx || !dd) { if (!ctx || !dd) {
dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n", pr_err("ctx=0x%x, dd=0x%x\n",
(unsigned int)ctx, (unsigned int)dd); (unsigned int)ctx, (unsigned int)dd);
return -EINVAL; return -EINVAL;
} }
...@@ -778,14 +777,8 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed, ...@@ -778,14 +777,8 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) { if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128; dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128;
} else { } else {
getnstimeofday(&ts); get_random_bytes(tmp, sizeof(tmp));
nsec = timespec_to_ns(&ts); dt = tmp;
do_div(nsec, 1000);
nsec ^= dd->ctr << 56;
dd->ctr++;
tmp[0] = nsec;
tmp[1] = tegra_chip_uid();
dt = (u8 *)tmp;
} }
memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ); memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
...@@ -804,7 +797,7 @@ static int tegra_aes_cra_init(struct crypto_tfm *tfm) ...@@ -804,7 +797,7 @@ static int tegra_aes_cra_init(struct crypto_tfm *tfm)
return 0; return 0;
} }
void tegra_aes_cra_exit(struct crypto_tfm *tfm) static void tegra_aes_cra_exit(struct crypto_tfm *tfm)
{ {
struct tegra_aes_ctx *ctx = struct tegra_aes_ctx *ctx =
crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm); crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm);
...@@ -924,7 +917,7 @@ static int tegra_aes_probe(struct platform_device *pdev) ...@@ -924,7 +917,7 @@ static int tegra_aes_probe(struct platform_device *pdev)
} }
/* Initialize the vde clock */ /* Initialize the vde clock */
dd->aes_clk = clk_get(dev, "vde"); dd->aes_clk = devm_clk_get(dev, "vde");
if (IS_ERR(dd->aes_clk)) { if (IS_ERR(dd->aes_clk)) {
dev_err(dev, "iclock intialization failed.\n"); dev_err(dev, "iclock intialization failed.\n");
err = -ENODEV; err = -ENODEV;
...@@ -1033,8 +1026,6 @@ static int tegra_aes_probe(struct platform_device *pdev) ...@@ -1033,8 +1026,6 @@ static int tegra_aes_probe(struct platform_device *pdev)
if (dd->buf_out) if (dd->buf_out)
dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
dd->buf_out, dd->dma_buf_out); dd->buf_out, dd->dma_buf_out);
if (!IS_ERR(dd->aes_clk))
clk_put(dd->aes_clk);
if (aes_wq) if (aes_wq)
destroy_workqueue(aes_wq); destroy_workqueue(aes_wq);
spin_lock(&list_lock); spin_lock(&list_lock);
...@@ -1068,7 +1059,6 @@ static int tegra_aes_remove(struct platform_device *pdev) ...@@ -1068,7 +1059,6 @@ static int tegra_aes_remove(struct platform_device *pdev)
dd->buf_in, dd->dma_buf_in); dd->buf_in, dd->dma_buf_in);
dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
dd->buf_out, dd->dma_buf_out); dd->buf_out, dd->dma_buf_out);
clk_put(dd->aes_clk);
aes_dev = NULL; aes_dev = NULL;
return 0; return 0;
......
#include <linux/hardirq.h>
/*
* may_use_simd - whether it is allowable at this time to issue SIMD
* instructions or access the SIMD register file
*
* As architectures typically don't preserve the SIMD register file when
* taking an interrupt, !in_interrupt() should be a reasonable default.
*/
static __must_check inline bool may_use_simd(void)
{
return !in_interrupt();
}
...@@ -386,5 +386,21 @@ static inline int crypto_requires_sync(u32 type, u32 mask) ...@@ -386,5 +386,21 @@ static inline int crypto_requires_sync(u32 type, u32 mask)
return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC;
} }
#endif /* _CRYPTO_ALGAPI_H */ noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
/**
* crypto_memneq - Compare two areas of memory without leaking
* timing information.
*
* @a: One area of memory
* @b: Another area of memory
* @size: The size of the area.
*
* Returns 0 when data is equal, 1 otherwise.
*/
static inline int crypto_memneq(const void *a, const void *b, size_t size)
{
return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
}
#endif /* _CRYPTO_ALGAPI_H */
...@@ -23,5 +23,15 @@ struct crypto_authenc_key_param { ...@@ -23,5 +23,15 @@ struct crypto_authenc_key_param {
__be32 enckeylen; __be32 enckeylen;
}; };
#endif /* _CRYPTO_AUTHENC_H */ struct crypto_authenc_keys {
const u8 *authkey;
const u8 *enckey;
unsigned int authkeylen;
unsigned int enckeylen;
};
int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
unsigned int keylen);
#endif /* _CRYPTO_AUTHENC_H */
...@@ -129,10 +129,9 @@ struct parallel_data { ...@@ -129,10 +129,9 @@ struct parallel_data {
struct padata_serial_queue __percpu *squeue; struct padata_serial_queue __percpu *squeue;
atomic_t reorder_objects; atomic_t reorder_objects;
atomic_t refcnt; atomic_t refcnt;
atomic_t seq_nr;
struct padata_cpumask cpumask; struct padata_cpumask cpumask;
spinlock_t lock ____cacheline_aligned; spinlock_t lock ____cacheline_aligned;
spinlock_t seq_lock;
unsigned int seq_nr;
unsigned int processed; unsigned int processed;
struct timer_list timer; struct timer_list timer;
}; };
......
...@@ -46,6 +46,7 @@ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) ...@@ -46,6 +46,7 @@ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
static int padata_cpu_hash(struct parallel_data *pd) static int padata_cpu_hash(struct parallel_data *pd)
{ {
unsigned int seq_nr;
int cpu_index; int cpu_index;
/* /*
...@@ -53,10 +54,8 @@ static int padata_cpu_hash(struct parallel_data *pd) ...@@ -53,10 +54,8 @@ static int padata_cpu_hash(struct parallel_data *pd)
* seq_nr mod. number of cpus in use. * seq_nr mod. number of cpus in use.
*/ */
spin_lock(&pd->seq_lock); seq_nr = atomic_inc_return(&pd->seq_nr);
cpu_index = pd->seq_nr % cpumask_weight(pd->cpumask.pcpu); cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
pd->seq_nr++;
spin_unlock(&pd->seq_lock);
return padata_index_to_cpu(pd, cpu_index); return padata_index_to_cpu(pd, cpu_index);
} }
...@@ -429,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, ...@@ -429,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
padata_init_pqueues(pd); padata_init_pqueues(pd);
padata_init_squeues(pd); padata_init_squeues(pd);
setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
pd->seq_nr = 0; atomic_set(&pd->seq_nr, -1);
atomic_set(&pd->reorder_objects, 0); atomic_set(&pd->reorder_objects, 0);
atomic_set(&pd->refcnt, 0); atomic_set(&pd->refcnt, 0);
pd->pinst = pinst; pd->pinst = pinst;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment