Commit 639b4ac6 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6 into next

Pull crypto updates from Herbert Xu:
 "Here is the crypto update for 3.16:

   - Added test vectors for SHA/AES-CCM/DES-CBC/3DES-CBC.
   - Fixed a number of error-path memory leaks in tcrypt.
   - Fixed error-path memory leak in caam.
   - Removed unnecessary global mutex from mxs-dcp.
   - Added ahash walk interface that can actually be asynchronous.
   - Cleaned up caam error reporting.
   - Allow crypto_user get operation to be used by non-root users.
   - Add support for SSS module on Exynos.
   - Misc fixes"

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6: (60 commits)
  crypto: testmgr - add aead cbc des, des3_ede tests
  crypto: testmgr - Fix DMA-API warning
  crypto: cesa - tfm->__crt_alg->cra_type directly
  crypto: sahara - tfm->__crt_alg->cra_name directly
  crypto: padlock - tfm->__crt_alg->cra_name directly
  crypto: n2 - tfm->__crt_alg->cra_name directly
  crypto: dcp - tfm->__crt_alg->cra_name directly
  crypto: cesa - tfm->__crt_alg->cra_name directly
  crypto: ccp - tfm->__crt_alg->cra_name directly
  crypto: geode - Don't use tfm->__crt_alg->cra_name directly
  crypto: geode - Weed out printk() from probe()
  crypto: geode - Consistently use AES_KEYSIZE_128
  crypto: geode - Kill AES_IV_LENGTH
  crypto: geode - Kill AES_MIN_BLOCK_SIZE
  crypto: mxs-dcp - Remove global mutex
  crypto: hash - Add real ahash walk interface
  hwrng: n2-drv - Introduce the use of the managed version of kzalloc
  crypto: caam - reinitialize keys_fit_inline for decrypt and givencrypt
  crypto: s5p-sss - fix multiplatform build
  hwrng: timeriomem - remove unnecessary OOM messages
  ...
parents 9d2cd01b 5208ed2c
Samsung SoC SSS (Security SubSystem) module
The SSS module in S5PV210 SoC supports the following:
-- Feeder (FeedCtrl)
-- Advanced Encryption Standard (AES)
-- Data Encryption Standard (DES)/3DES
-- Public Key Accelerator (PKA)
-- SHA-1/SHA-256/MD5/HMAC (SHA-1/SHA-256/MD5)/PRNG
-- PRNG: Pseudo Random Number Generator
The SSS module in Exynos4 (Exynos4210) and
Exynos5 (Exynos5420 and Exynos5250) SoCs
supports the following also:
-- ARCFOUR (ARC4)
-- True Random Number Generator (TRNG)
-- Secure Key Manager
Required properties:
- compatible : Should contain entries for this and backward compatible
SSS versions:
- "samsung,s5pv210-secss" for S5PV210 SoC.
- "samsung,exynos4210-secss" for Exynos4210, Exynos4212, Exynos4412, Exynos5250,
Exynos5260 and Exynos5420 SoCs.
- reg : Offset and length of the register set for the module
- interrupts : interrupt specifiers of SSS module interrupts, should contain
following entries:
- first : feed control interrupt (required for all variants),
- second : hash interrupt (required only for samsung,s5pv210-secss).
- clocks : list of clock phandle and specifier pairs for all clocks listed in
clock-names property.
- clock-names : list of device clock input names; should contain one entry
"secss".
......@@ -92,7 +92,7 @@ __clmul_gf128mul_ble:
ret
ENDPROC(__clmul_gf128mul_ble)
/* void clmul_ghash_mul(char *dst, const be128 *shash) */
/* void clmul_ghash_mul(char *dst, const u128 *shash) */
ENTRY(clmul_ghash_mul)
movups (%rdi), DATA
movups (%rsi), SHASH
......@@ -106,7 +106,7 @@ ENDPROC(clmul_ghash_mul)
/*
* void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
* const be128 *shash);
* const u128 *shash);
*/
ENTRY(clmul_ghash_update)
cmp $16, %rdx
......
......@@ -25,17 +25,17 @@
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
void clmul_ghash_mul(char *dst, const be128 *shash);
void clmul_ghash_mul(char *dst, const u128 *shash);
void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
const be128 *shash);
const u128 *shash);
struct ghash_async_ctx {
struct cryptd_ahash *cryptd_tfm;
};
struct ghash_ctx {
be128 shash;
u128 shash;
};
struct ghash_desc_ctx {
......@@ -68,11 +68,11 @@ static int ghash_setkey(struct crypto_shash *tfm,
a = be64_to_cpu(x->a);
b = be64_to_cpu(x->b);
ctx->shash.a = (__be64)((b << 1) | (a >> 63));
ctx->shash.b = (__be64)((a << 1) | (b >> 63));
ctx->shash.a = (b << 1) | (a >> 63);
ctx->shash.b = (a << 1) | (b >> 63);
if (a >> 63)
ctx->shash.b ^= cpu_to_be64(0xc2);
ctx->shash.b ^= ((u64)0xc2) << 56;
return 0;
}
......
......@@ -15,6 +15,7 @@
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
......@@ -46,7 +47,10 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset);
walk->data = kmap_atomic(walk->pg);
if (walk->flags & CRYPTO_ALG_ASYNC)
walk->data = kmap(walk->pg);
else
walk->data = kmap_atomic(walk->pg);
walk->data += offset;
if (offset & alignmask) {
......@@ -93,8 +97,16 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
return nbytes;
}
kunmap_atomic(walk->data);
crypto_yield(walk->flags);
if (walk->flags & CRYPTO_ALG_ASYNC)
kunmap(walk->pg);
else {
kunmap_atomic(walk->data);
/*
* The may sleep test only makes sense for sync users.
* Async users don't need to sleep here anyway.
*/
crypto_yield(walk->flags);
}
if (err)
return err;
......@@ -124,12 +136,31 @@ int crypto_hash_walk_first(struct ahash_request *req,
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
walk->sg = req->src;
walk->flags = req->base.flags;
walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
int crypto_ahash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk)
{
walk->total = req->nbytes;
if (!walk->total)
return 0;
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
walk->sg = req->src;
walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
walk->flags |= CRYPTO_ALG_ASYNC;
BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
struct crypto_hash_walk *walk,
struct scatterlist *sg, unsigned int len)
......@@ -141,7 +172,7 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
walk->sg = sg;
walk->flags = hdesc->flags;
walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK;
return hash_walk_new_entry(walk);
}
......
......@@ -265,6 +265,9 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
LIST_HEAD(list);
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
return -EINVAL;
......@@ -295,6 +298,9 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct crypto_alg *alg;
struct crypto_user_alg *p = nlmsg_data(nlh);
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
return -EINVAL;
......@@ -379,6 +385,9 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct crypto_user_alg *p = nlmsg_data(nlh);
struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
return -EINVAL;
......@@ -466,9 +475,6 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
type -= CRYPTO_MSG_BASE;
link = &crypto_dispatch[type];
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
(nlh->nlmsg_flags & NLM_F_DUMP))) {
struct crypto_alg *alg;
......
......@@ -282,6 +282,11 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec,
unsigned int *b_size;
unsigned int iv_len;
if (aad_size >= PAGE_SIZE) {
pr_err("associate data length (%u) too big\n", aad_size);
return;
}
if (enc == ENCRYPT)
e = "encryption";
else
......@@ -308,14 +313,14 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec,
if (IS_ERR(tfm)) {
pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
return;
goto out_notfm;
}
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("alg: aead: Failed to allocate request for %s\n",
algo);
goto out;
goto out_noreq;
}
i = 0;
......@@ -323,14 +328,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec,
b_size = aead_sizes;
do {
assoc = axbuf[0];
if (aad_size < PAGE_SIZE)
memset(assoc, 0xff, aad_size);
else {
pr_err("associate data length (%u) too big\n",
aad_size);
goto out_nosg;
}
memset(assoc, 0xff, aad_size);
sg_init_one(&asg[0], assoc, aad_size);
if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
......@@ -392,7 +390,10 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec,
} while (*keysize);
out:
aead_request_free(req);
out_noreq:
crypto_free_aead(tfm);
out_notfm:
kfree(sg);
out_nosg:
testmgr_free_buf(xoutbuf);
......@@ -1518,7 +1519,36 @@ static int do_test(int m)
case 157:
ret += tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))");
break;
case 181:
ret += tcrypt_test("authenc(hmac(sha1),cbc(des))");
break;
case 182:
ret += tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))");
break;
case 183:
ret += tcrypt_test("authenc(hmac(sha224),cbc(des))");
break;
case 184:
ret += tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))");
break;
case 185:
ret += tcrypt_test("authenc(hmac(sha256),cbc(des))");
break;
case 186:
ret += tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))");
break;
case 187:
ret += tcrypt_test("authenc(hmac(sha384),cbc(des))");
break;
case 188:
ret += tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))");
break;
case 189:
ret += tcrypt_test("authenc(hmac(sha512),cbc(des))");
break;
case 190:
ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
break;
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
......
......@@ -414,16 +414,18 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
void *input;
void *output;
void *assoc;
char iv[MAX_IVLEN];
char *iv;
char *xbuf[XBUFSIZE];
char *xoutbuf[XBUFSIZE];
char *axbuf[XBUFSIZE];
iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
if (!iv)
return ret;
if (testmgr_alloc_buf(xbuf))
goto out_noxbuf;
if (testmgr_alloc_buf(axbuf))
goto out_noaxbuf;
if (diff_dst && testmgr_alloc_buf(xoutbuf))
goto out_nooutbuf;
......@@ -767,6 +769,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
out_noaxbuf:
testmgr_free_buf(xbuf);
out_noxbuf:
kfree(iv);
return ret;
}
......@@ -1831,8 +1834,38 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite = {
.aead = {
.enc = {
.vecs = hmac_sha1_aes_cbc_enc_tv_template,
.count = HMAC_SHA1_AES_CBC_ENC_TEST_VECTORS
.vecs =
hmac_sha1_aes_cbc_enc_tv_temp,
.count =
HMAC_SHA1_AES_CBC_ENC_TEST_VEC
}
}
}
}, {
.alg = "authenc(hmac(sha1),cbc(des))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
.enc = {
.vecs =
hmac_sha1_des_cbc_enc_tv_temp,
.count =
HMAC_SHA1_DES_CBC_ENC_TEST_VEC
}
}
}
}, {
.alg = "authenc(hmac(sha1),cbc(des3_ede))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
.enc = {
.vecs =
hmac_sha1_des3_ede_cbc_enc_tv_temp,
.count =
HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC
}
}
}
......@@ -1843,12 +1876,44 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite = {
.aead = {
.enc = {
.vecs = hmac_sha1_ecb_cipher_null_enc_tv_template,
.count = HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VECTORS
.vecs =
hmac_sha1_ecb_cipher_null_enc_tv_temp,
.count =
HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC
},
.dec = {
.vecs = hmac_sha1_ecb_cipher_null_dec_tv_template,
.count = HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VECTORS
.vecs =
hmac_sha1_ecb_cipher_null_dec_tv_temp,
.count =
HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC
}
}
}
}, {
.alg = "authenc(hmac(sha224),cbc(des))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
.enc = {
.vecs =
hmac_sha224_des_cbc_enc_tv_temp,
.count =
HMAC_SHA224_DES_CBC_ENC_TEST_VEC
}
}
}
}, {
.alg = "authenc(hmac(sha224),cbc(des3_ede))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
.enc = {
.vecs =
hmac_sha224_des3_ede_cbc_enc_tv_temp,
.count =
HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC
}
}
}
......@@ -1859,8 +1924,66 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite = {
.aead = {
.enc = {
.vecs = hmac_sha256_aes_cbc_enc_tv_template,
.count = HMAC_SHA256_AES_CBC_ENC_TEST_VECTORS
.vecs =
hmac_sha256_aes_cbc_enc_tv_temp,
.count =
HMAC_SHA256_AES_CBC_ENC_TEST_VEC
}
}
}
}, {
.alg = "authenc(hmac(sha256),cbc(des))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
.enc = {
.vecs =
hmac_sha256_des_cbc_enc_tv_temp,
.count =
HMAC_SHA256_DES_CBC_ENC_TEST_VEC
}
}
}
}, {
.alg = "authenc(hmac(sha256),cbc(des3_ede))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
.enc = {
.vecs =
hmac_sha256_des3_ede_cbc_enc_tv_temp,
.count =
HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC
}
}
}
}, {
.alg = "authenc(hmac(sha384),cbc(des))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
.enc = {
.vecs =
hmac_sha384_des_cbc_enc_tv_temp,
.count =
HMAC_SHA384_DES_CBC_ENC_TEST_VEC
}
}
}
}, {
.alg = "authenc(hmac(sha384),cbc(des3_ede))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
.enc = {
.vecs =
hmac_sha384_des3_ede_cbc_enc_tv_temp,
.count =
HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC
}
}
}
......@@ -1871,8 +1994,38 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite = {
.aead = {
.enc = {
.vecs = hmac_sha512_aes_cbc_enc_tv_template,
.count = HMAC_SHA512_AES_CBC_ENC_TEST_VECTORS
.vecs =
hmac_sha512_aes_cbc_enc_tv_temp,
.count =
HMAC_SHA512_AES_CBC_ENC_TEST_VEC
}
}
}
}, {
.alg = "authenc(hmac(sha512),cbc(des))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
.enc = {
.vecs =
hmac_sha512_des_cbc_enc_tv_temp,
.count =
HMAC_SHA512_DES_CBC_ENC_TEST_VEC
}
}
}
}, {
.alg = "authenc(hmac(sha512),cbc(des3_ede))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
.enc = {
.vecs =
hmac_sha512_des3_ede_cbc_enc_tv_temp,
.count =
HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC
}
}
}
......@@ -3273,8 +3426,8 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
panic("%s: %s alg self test failed in fips mode!\n", driver, alg);
if (fips_enabled && !rc)
printk(KERN_INFO "alg: self-tests for %s (%s) passed\n",
driver, alg);
pr_info(KERN_INFO "alg: self-tests for %s (%s) passed\n",
driver, alg);
return rc;
......
This diff is collapsed.
......@@ -2,7 +2,7 @@
# Hardware Random Number Generator (RNG) configuration
#
config HW_RANDOM
menuconfig HW_RANDOM
tristate "Hardware Random Number Generator Core support"
default m
---help---
......@@ -20,9 +20,11 @@ config HW_RANDOM
If unsure, say Y.
if HW_RANDOM
config HW_RANDOM_TIMERIOMEM
tristate "Timer IOMEM HW Random Number Generator support"
depends on HW_RANDOM && HAS_IOMEM
depends on HAS_IOMEM
---help---
This driver provides kernel-side support for a generic Random
Number Generator used by reading a 'dumb' iomem address that
......@@ -36,7 +38,7 @@ config HW_RANDOM_TIMERIOMEM
config HW_RANDOM_INTEL
tristate "Intel HW Random Number Generator support"
depends on HW_RANDOM && (X86 || IA64) && PCI
depends on (X86 || IA64) && PCI
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
......@@ -49,7 +51,7 @@ config HW_RANDOM_INTEL
config HW_RANDOM_AMD
tristate "AMD HW Random Number Generator support"
depends on HW_RANDOM && (X86 || PPC_MAPLE) && PCI
depends on (X86 || PPC_MAPLE) && PCI
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
......@@ -62,8 +64,8 @@ config HW_RANDOM_AMD
config HW_RANDOM_ATMEL
tristate "Atmel Random Number Generator support"
depends on HW_RANDOM && HAVE_CLK
default (HW_RANDOM && ARCH_AT91)
depends on ARCH_AT91 && HAVE_CLK
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on Atmel AT91 devices.
......@@ -75,7 +77,7 @@ config HW_RANDOM_ATMEL
config HW_RANDOM_BCM63XX
tristate "Broadcom BCM63xx Random Number Generator support"
depends on HW_RANDOM && BCM63XX
depends on BCM63XX
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
......@@ -88,7 +90,7 @@ config HW_RANDOM_BCM63XX
config HW_RANDOM_BCM2835
tristate "Broadcom BCM2835 Random Number Generator support"
depends on HW_RANDOM && ARCH_BCM2835
depends on ARCH_BCM2835
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
......@@ -101,7 +103,7 @@ config HW_RANDOM_BCM2835
config HW_RANDOM_GEODE
tristate "AMD Geode HW Random Number Generator support"
depends on HW_RANDOM && X86_32 && PCI
depends on X86_32 && PCI
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
......@@ -114,7 +116,7 @@ config HW_RANDOM_GEODE
config HW_RANDOM_N2RNG
tristate "Niagara2 Random Number Generator support"
depends on HW_RANDOM && SPARC64
depends on SPARC64
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
......@@ -127,7 +129,7 @@ config HW_RANDOM_N2RNG
config HW_RANDOM_VIA
tristate "VIA HW Random Number Generator support"
depends on HW_RANDOM && X86
depends on X86
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
......@@ -140,7 +142,7 @@ config HW_RANDOM_VIA
config HW_RANDOM_IXP4XX
tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support"
depends on HW_RANDOM && ARCH_IXP4XX
depends on ARCH_IXP4XX
default HW_RANDOM
---help---
This driver provides kernel-side support for the Pseudo-Random
......@@ -153,7 +155,7 @@ config HW_RANDOM_IXP4XX
config HW_RANDOM_OMAP
tristate "OMAP Random Number Generator support"
depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP2PLUS)
depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
......@@ -167,7 +169,7 @@ config HW_RANDOM_OMAP
config HW_RANDOM_OMAP3_ROM
tristate "OMAP3 ROM Random Number Generator support"
depends on HW_RANDOM && ARCH_OMAP3
depends on ARCH_OMAP3
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
......@@ -180,7 +182,7 @@ config HW_RANDOM_OMAP3_ROM
config HW_RANDOM_OCTEON
tristate "Octeon Random Number Generator support"
depends on HW_RANDOM && CAVIUM_OCTEON_SOC
depends on CAVIUM_OCTEON_SOC
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
......@@ -193,7 +195,7 @@ config HW_RANDOM_OCTEON
config HW_RANDOM_PASEMI
tristate "PA Semi HW Random Number Generator support"
depends on HW_RANDOM && PPC_PASEMI
depends on PPC_PASEMI
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
......@@ -206,7 +208,7 @@ config HW_RANDOM_PASEMI
config HW_RANDOM_VIRTIO
tristate "VirtIO Random Number Generator support"
depends on HW_RANDOM && VIRTIO
depends on VIRTIO
---help---
This driver provides kernel-side support for the virtual Random Number
Generator hardware.
......@@ -216,7 +218,7 @@ config HW_RANDOM_VIRTIO
config HW_RANDOM_TX4939
tristate "TX4939 Random Number Generator support"
depends on HW_RANDOM && SOC_TX4939
depends on SOC_TX4939
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
......@@ -229,7 +231,8 @@ config HW_RANDOM_TX4939
config HW_RANDOM_MXC_RNGA
tristate "Freescale i.MX RNGA Random Number Generator"
depends on HW_RANDOM && ARCH_HAS_RNGA
depends on ARCH_HAS_RNGA
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on Freescale i.MX processors.
......@@ -241,7 +244,8 @@ config HW_RANDOM_MXC_RNGA
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
depends on HW_RANDOM && ARCH_NOMADIK
depends on ARCH_NOMADIK
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on ST-Ericsson SoCs (8815 and 8500).
......@@ -251,21 +255,10 @@ config HW_RANDOM_NOMADIK
If unsure, say Y.
config HW_RANDOM_PICOXCELL
tristate "Picochip picoXcell true random number generator support"
depends on HW_RANDOM && ARCH_PICOXCELL && PICOXCELL_PC3X3
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on Picochip PC3x3 and later devices.
To compile this driver as a module, choose M here: the
module will be called picoxcell-rng.
If unsure, say Y.
config HW_RANDOM_PPC4XX
tristate "PowerPC 4xx generic true random number generator support"
depends on HW_RANDOM && PPC && 4xx
depends on PPC && 4xx
default HW_RANDOM
---help---
This driver provides the kernel-side support for the TRNG hardware
found in the security function of some PowerPC 4xx SoCs.
......@@ -275,24 +268,9 @@ config HW_RANDOM_PPC4XX
If unsure, say N.
config UML_RANDOM
depends on UML
tristate "Hardware random number generator"
help
This option enables UML's "hardware" random number generator. It
attaches itself to the host's /dev/random, supplying as much entropy
as the host has, rather than the small amount the UML gets from its
own drivers. It registers itself as a standard hardware random number
generator, major 10, minor 183, and the canonical device name is
/dev/hwrng.
The way to make use of this is to install the rng-tools package
(check your distro, or download from
http://sourceforge.net/projects/gkernel/). rngd periodically reads
/dev/hwrng and injects the entropy into /dev/random.
config HW_RANDOM_PSERIES
tristate "pSeries HW Random Number Generator support"
depends on HW_RANDOM && PPC64 && IBMVIO
depends on PPC64 && IBMVIO
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
......@@ -305,7 +283,7 @@ config HW_RANDOM_PSERIES
config HW_RANDOM_POWERNV
tristate "PowerNV Random Number Generator support"
depends on HW_RANDOM && PPC_POWERNV
depends on PPC_POWERNV
default HW_RANDOM
---help---
This is the driver for Random Number Generator hardware found
......@@ -318,7 +296,8 @@ config HW_RANDOM_POWERNV
config HW_RANDOM_EXYNOS
tristate "EXYNOS HW random number generator support"
depends on HW_RANDOM && HAS_IOMEM && HAVE_CLK
depends on ARCH_EXYNOS
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on EXYNOS SOCs.
......@@ -330,7 +309,7 @@ config HW_RANDOM_EXYNOS
config HW_RANDOM_TPM
tristate "TPM HW Random Number Generator support"
depends on HW_RANDOM && TCG_TPM
depends on TCG_TPM
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
......@@ -344,6 +323,7 @@ config HW_RANDOM_TPM
config HW_RANDOM_MSM
tristate "Qualcomm SoCs Random Number Generator support"
depends on HW_RANDOM && ARCH_QCOM
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on Qualcomm SoCs.
......@@ -352,3 +332,20 @@ config HW_RANDOM_MSM
module will be called msm-rng.
If unsure, say Y.
endif # HW_RANDOM
config UML_RANDOM
depends on UML
tristate "Hardware random number generator"
help
This option enables UML's "hardware" random number generator. It
attaches itself to the host's /dev/random, supplying as much entropy
as the host has, rather than the small amount the UML gets from its
own drivers. It registers itself as a standard hardware random number
generator, major 10, minor 183, and the canonical device name is
/dev/hwrng.
The way to make use of this is to install the rng-tools package
(check your distro, or download from
http://sourceforge.net/projects/gkernel/). rngd periodically reads
/dev/hwrng and injects the entropy into /dev/random.
......@@ -22,7 +22,6 @@ obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
......
......@@ -632,7 +632,7 @@ static int n2rng_probe(struct platform_device *op)
multi_capable = (match->data != NULL);
n2rng_driver_version();
np = kzalloc(sizeof(*np), GFP_KERNEL);
np = devm_kzalloc(&op->dev, sizeof(*np), GFP_KERNEL);
if (!np)
goto out;
np->op = op;
......@@ -653,7 +653,7 @@ static int n2rng_probe(struct platform_device *op)
&np->hvapi_minor)) {
dev_err(&op->dev, "Cannot register suitable "
"HVAPI version.\n");
goto out_free;
goto out;
}
}
......@@ -676,15 +676,16 @@ static int n2rng_probe(struct platform_device *op)
dev_info(&op->dev, "Registered RNG HVAPI major %lu minor %lu\n",
np->hvapi_major, np->hvapi_minor);
np->units = kzalloc(sizeof(struct n2rng_unit) * np->num_units,
GFP_KERNEL);
np->units = devm_kzalloc(&op->dev,
sizeof(struct n2rng_unit) * np->num_units,
GFP_KERNEL);
err = -ENOMEM;
if (!np->units)
goto out_hvapi_unregister;
err = n2rng_init_control(np);
if (err)
goto out_free_units;
goto out_hvapi_unregister;
dev_info(&op->dev, "Found %s RNG, units: %d\n",
((np->flags & N2RNG_FLAG_MULTI) ?
......@@ -697,7 +698,7 @@ static int n2rng_probe(struct platform_device *op)
err = hwrng_register(&np->hwrng);
if (err)
goto out_free_units;
goto out_hvapi_unregister;
platform_set_drvdata(op, np);
......@@ -705,15 +706,9 @@ static int n2rng_probe(struct platform_device *op)
return 0;
out_free_units:
kfree(np->units);
np->units = NULL;
out_hvapi_unregister:
sun4v_hvapi_unregister(HV_GRP_RNG);
out_free:
kfree(np);
out:
return err;
}
......@@ -730,11 +725,6 @@ static int n2rng_remove(struct platform_device *op)
sun4v_hvapi_unregister(HV_GRP_RNG);
kfree(np->units);
np->units = NULL;
kfree(np);
return 0;
}
......
......@@ -369,10 +369,8 @@ static int omap_rng_probe(struct platform_device *pdev)
int ret;
priv = devm_kzalloc(dev, sizeof(struct omap_rng_dev), GFP_KERNEL);
if (!priv) {
dev_err(&pdev->dev, "could not allocate memory\n");
if (!priv)
return -ENOMEM;
};
omap_rng_ops.priv = (unsigned long)priv;
platform_set_drvdata(pdev, priv);
......
/*
* Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* All enquiries to support@picochip.com
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#define DATA_REG_OFFSET 0x0200
#define CSR_REG_OFFSET 0x0278
#define CSR_OUT_EMPTY_MASK (1 << 24)
#define CSR_FAULT_MASK (1 << 1)
#define TRNG_BLOCK_RESET_MASK (1 << 0)
#define TAI_REG_OFFSET 0x0380
/*
* The maximum amount of time in microseconds to spend waiting for data if the
* core wants us to wait. The TRNG should generate 32 bits every 320ns so a
* timeout of 20us seems reasonable. The TRNG does builtin tests of the data
* for randomness so we can't always assume there is data present.
*/
#define PICO_TRNG_TIMEOUT 20
static void __iomem *rng_base;
static struct clk *rng_clk;
static struct device *rng_dev;
static inline u32 picoxcell_trng_read_csr(void)
{
return __raw_readl(rng_base + CSR_REG_OFFSET);
}
static inline bool picoxcell_trng_is_empty(void)
{
return picoxcell_trng_read_csr() & CSR_OUT_EMPTY_MASK;
}
/*
* Take the random number generator out of reset and make sure the interrupts
* are masked. We shouldn't need to get large amounts of random bytes so just
* poll the status register. The hardware generates 32 bits every 320ns so we
* shouldn't have to wait long enough to warrant waiting for an IRQ.
*/
static void picoxcell_trng_start(void)
{
__raw_writel(0, rng_base + TAI_REG_OFFSET);
__raw_writel(0, rng_base + CSR_REG_OFFSET);
}
static void picoxcell_trng_reset(void)
{
__raw_writel(TRNG_BLOCK_RESET_MASK, rng_base + CSR_REG_OFFSET);
__raw_writel(TRNG_BLOCK_RESET_MASK, rng_base + TAI_REG_OFFSET);
picoxcell_trng_start();
}
/*
* Get some random data from the random number generator. The hw_random core
* layer provides us with locking.
*/
static int picoxcell_trng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
int i;
/* Wait for some data to become available. */
for (i = 0; i < PICO_TRNG_TIMEOUT && picoxcell_trng_is_empty(); ++i) {
if (!wait)
return 0;
udelay(1);
}
if (picoxcell_trng_read_csr() & CSR_FAULT_MASK) {
dev_err(rng_dev, "fault detected, resetting TRNG\n");
picoxcell_trng_reset();
return -EIO;
}
if (i == PICO_TRNG_TIMEOUT)
return 0;
*(u32 *)buf = __raw_readl(rng_base + DATA_REG_OFFSET);
return sizeof(u32);
}
static struct hwrng picoxcell_trng = {
.name = "picoxcell",
.read = picoxcell_trng_read,
};
static int picoxcell_trng_probe(struct platform_device *pdev)
{
int ret;
struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rng_base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(rng_base))
return PTR_ERR(rng_base);
rng_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(rng_clk)) {
dev_warn(&pdev->dev, "no clk\n");
return PTR_ERR(rng_clk);
}
ret = clk_enable(rng_clk);
if (ret) {
dev_warn(&pdev->dev, "unable to enable clk\n");
return ret;
}
picoxcell_trng_start();
ret = hwrng_register(&picoxcell_trng);
if (ret)
goto err_register;
rng_dev = &pdev->dev;
dev_info(&pdev->dev, "pixoxcell random number generator active\n");
return 0;
err_register:
clk_disable(rng_clk);
return ret;
}
static int picoxcell_trng_remove(struct platform_device *pdev)
{
hwrng_unregister(&picoxcell_trng);
clk_disable(rng_clk);
return 0;
}
#ifdef CONFIG_PM
static int picoxcell_trng_suspend(struct device *dev)
{
clk_disable(rng_clk);
return 0;
}
static int picoxcell_trng_resume(struct device *dev)
{
return clk_enable(rng_clk);
}
static const struct dev_pm_ops picoxcell_trng_pm_ops = {
.suspend = picoxcell_trng_suspend,
.resume = picoxcell_trng_resume,
};
#endif /* CONFIG_PM */
static struct platform_driver picoxcell_trng_driver = {
.probe = picoxcell_trng_probe,
.remove = picoxcell_trng_remove,
.driver = {
.name = "picoxcell-trng",
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &picoxcell_trng_pm_ops,
#endif /* CONFIG_PM */
},
};
module_platform_driver(picoxcell_trng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamie Iles");
MODULE_DESCRIPTION("Picochip picoXcell TRNG driver");
......@@ -120,10 +120,8 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
/* Allocate memory for the device structure (and zero it) */
priv = devm_kzalloc(&pdev->dev,
sizeof(struct timeriomem_rng_private_data), GFP_KERNEL);
if (!priv) {
dev_err(&pdev->dev, "failed to allocate device structure.\n");
if (!priv)
return -ENOMEM;
}
platform_set_drvdata(pdev, priv);
......
......@@ -301,14 +301,14 @@ config CRYPTO_DEV_SAHARA
found in some Freescale i.MX chips.
config CRYPTO_DEV_S5P
tristate "Support for Samsung S5PV210 crypto accelerator"
depends on ARCH_S5PV210
tristate "Support for Samsung S5PV210/Exynos crypto accelerator"
depends on ARCH_S5PV210 || ARCH_EXYNOS
select CRYPTO_AES
select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER
help
This option allows you to have support for S5P crypto acceleration.
Select this to offload Samsung S5PV210 or S5PC110 from AES
Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
algorithms execution.
config CRYPTO_DEV_NX
......
......@@ -716,6 +716,12 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
return -EINVAL;
}
ctx->block_size = CFB32_BLOCK_SIZE;
} else if (mode & AES_FLAGS_CFB64) {
if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB64 blocks\n");
return -EINVAL;
}
ctx->block_size = CFB64_BLOCK_SIZE;
} else {
if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
pr_err("request size is not exact amount of AES blocks\n");
......@@ -1069,7 +1075,7 @@ static struct crypto_alg aes_algs[] = {
.cra_driver_name = "atmel-cfb8-aes",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB64_BLOCK_SIZE,
.cra_blocksize = CFB8_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
.cra_alignmask = 0x0,
.cra_type = &crypto_ablkcipher_type,
......
......@@ -29,10 +29,11 @@
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <asm/blackfin.h>
#include <asm/bfin_crc.h>
#include <asm/dma.h>
#include <asm/portmux.h>
#include <asm/io.h>
#include "bfin_crc.h"
#define CRC_CCRYPTO_QUEUE_LENGTH 5
......@@ -54,12 +55,13 @@ struct bfin_crypto_crc {
int irq;
int dma_ch;
u32 poly;
volatile struct crc_register *regs;
struct crc_register *regs;
struct ahash_request *req; /* current request in operation */
struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */
dma_addr_t sg_dma; /* phy addr of sg dma descriptors */
u8 *sg_mid_buf;
dma_addr_t sg_mid_dma; /* phy addr of sg mid buffer */
struct tasklet_struct done_task;
struct crypto_queue queue; /* waiting requests */
......@@ -132,13 +134,13 @@ static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nent
static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
{
crc->regs->datacntrld = 0;
crc->regs->control = MODE_CALC_CRC << OPMODE_OFFSET;
crc->regs->curresult = key;
writel(0, &crc->regs->datacntrld);
writel(MODE_CALC_CRC << OPMODE_OFFSET, &crc->regs->control);
writel(key, &crc->regs->curresult);
/* setup CRC interrupts */
crc->regs->status = CMPERRI | DCNTEXPI;
crc->regs->intrenset = CMPERRI | DCNTEXPI;
writel(CMPERRI | DCNTEXPI, &crc->regs->status);
writel(CMPERRI | DCNTEXPI, &crc->regs->intrenset);
return 0;
}
......@@ -194,7 +196,6 @@ static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE);
for_each_sg(ctx->sg, sg, ctx->sg_nents, j) {
dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
dma_addr = sg_dma_address(sg);
/* deduce extra bytes in last sg */
if (sg_is_last(sg))
......@@ -207,12 +208,29 @@ static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
bytes in current sg buffer. Move addr of current
sg and deduce the length of current sg.
*/
memcpy(crc->sg_mid_buf +((i-1) << 2) + mid_dma_count,
(void *)dma_addr,
memcpy(crc->sg_mid_buf +(i << 2) + mid_dma_count,
sg_virt(sg),
CHKSUM_DIGEST_SIZE - mid_dma_count);
dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count;
dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count;
dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
DMAEN | PSIZE_32 | WDSIZE_32;
/* setup new dma descriptor for next middle dma */
crc->sg_cpu[i].start_addr = crc->sg_mid_dma + (i << 2);
crc->sg_cpu[i].cfg = dma_config;
crc->sg_cpu[i].x_count = 1;
crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
"cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
i, crc->sg_cpu[i].start_addr,
crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
crc->sg_cpu[i].x_modify);
i++;
}
dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
/* chop current sg dma len to multiple of 32 bits */
mid_dma_count = dma_count % 4;
dma_count &= ~0x3;
......@@ -243,24 +261,9 @@ static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
if (mid_dma_count) {
/* copy extra bytes to next middle dma buffer */
dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
DMAEN | PSIZE_32 | WDSIZE_32;
memcpy(crc->sg_mid_buf + (i << 2),
(void *)(dma_addr + (dma_count << 2)),
(u8*)sg_virt(sg) + (dma_count << 2),
mid_dma_count);
/* setup new dma descriptor for next middle dma */
crc->sg_cpu[i].start_addr = dma_map_single(crc->dev,
crc->sg_mid_buf + (i << 2),
CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
crc->sg_cpu[i].cfg = dma_config;
crc->sg_cpu[i].x_count = 1;
crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
"cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
i, crc->sg_cpu[i].start_addr,
crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
crc->sg_cpu[i].x_modify);
i++;
}
}
......@@ -303,6 +306,7 @@ static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
int nsg, i, j;
unsigned int nextlen;
unsigned long flags;
u32 reg;
spin_lock_irqsave(&crc->lock, flags);
if (req)
......@@ -402,13 +406,14 @@ static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
ctx->sg_buflen += CHKSUM_DIGEST_SIZE;
/* set CRC data count before start DMA */
crc->regs->datacnt = ctx->sg_buflen >> 2;
writel(ctx->sg_buflen >> 2, &crc->regs->datacnt);
/* setup and enable CRC DMA */
bfin_crypto_crc_config_dma(crc);
/* finally kick off CRC operation */
crc->regs->control |= BLKEN;
reg = readl(&crc->regs->control);
writel(reg | BLKEN, &crc->regs->control);
return -EINPROGRESS;
}
......@@ -529,14 +534,17 @@ static void bfin_crypto_crc_done_task(unsigned long data)
static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
{
struct bfin_crypto_crc *crc = dev_id;
u32 reg;
if (crc->regs->status & DCNTEXP) {
crc->regs->status = DCNTEXP;
if (readl(&crc->regs->status) & DCNTEXP) {
writel(DCNTEXP, &crc->regs->status);
/* prepare results */
put_unaligned_le32(crc->regs->result, crc->req->result);
put_unaligned_le32(readl(&crc->regs->result),
crc->req->result);
crc->regs->control &= ~BLKEN;
reg = readl(&crc->regs->control);
writel(reg & ~BLKEN, &crc->regs->control);
crc->busy = 0;
if (crc->req->base.complete)
......@@ -560,7 +568,7 @@ static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t st
struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
int i = 100000;
while ((crc->regs->control & BLKEN) && --i)
while ((readl(&crc->regs->control) & BLKEN) && --i)
cpu_relax();
if (i == 0)
......@@ -647,29 +655,32 @@ static int bfin_crypto_crc_probe(struct platform_device *pdev)
* 1 last + 1 next dma descriptors
*/
crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
crc->sg_mid_dma = crc->sg_dma + sizeof(struct dma_desc_array)
* ((CRC_MAX_DMA_DESC + 1) << 1);
crc->regs->control = 0;
crc->regs->poly = crc->poly = (u32)pdev->dev.platform_data;
writel(0, &crc->regs->control);
crc->poly = (u32)pdev->dev.platform_data;
writel(crc->poly, &crc->regs->poly);
while (!(crc->regs->status & LUTDONE) && (--timeout) > 0)
while (!(readl(&crc->regs->status) & LUTDONE) && (--timeout) > 0)
cpu_relax();
if (timeout == 0)
dev_info(&pdev->dev, "init crc poly timeout\n");
platform_set_drvdata(pdev, crc);
spin_lock(&crc_list.lock);
list_add(&crc->list, &crc_list.dev_list);
spin_unlock(&crc_list.lock);
platform_set_drvdata(pdev, crc);
ret = crypto_register_ahash(&algs);
if (ret) {
spin_lock(&crc_list.lock);
list_del(&crc->list);
spin_unlock(&crc_list.lock);
dev_err(&pdev->dev, "Cann't register crypto ahash device\n");
goto out_error_dma;
if (list_is_singular(&crc_list.dev_list)) {
ret = crypto_register_ahash(&algs);
if (ret) {
dev_err(&pdev->dev,
"Can't register crypto ahash device\n");
goto out_error_dma;
}
}
dev_info(&pdev->dev, "initialized\n");
......
/*
* bfin_crc.h - interface to Blackfin CRC controllers
*
* Copyright 2012 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#ifndef __BFIN_CRC_H__
#define __BFIN_CRC_H__
/* Function driver which use hardware crc must initialize the structure */
struct crc_info {
/* Input data address */
unsigned char *in_addr;
/* Output data address */
unsigned char *out_addr;
/* Input or output bytes */
unsigned long datasize;
union {
/* CRC to compare with that of input buffer */
unsigned long crc_compare;
/* Value to compare with input data */
unsigned long val_verify;
/* Value to fill */
unsigned long val_fill;
};
/* Value to program the 32b CRC Polynomial */
unsigned long crc_poly;
union {
/* CRC calculated from the input data */
unsigned long crc_result;
/* First failed position to verify input data */
unsigned long pos_verify;
};
/* CRC mirror flags */
unsigned int bitmirr:1;
unsigned int bytmirr:1;
unsigned int w16swp:1;
unsigned int fdsel:1;
unsigned int rsltmirr:1;
unsigned int polymirr:1;
unsigned int cmpmirr:1;
};
/* Userspace interface */
#define CRC_IOC_MAGIC 'C'
#define CRC_IOC_CALC_CRC _IOWR('C', 0x01, unsigned int)
#define CRC_IOC_MEMCPY_CRC _IOWR('C', 0x02, unsigned int)
#define CRC_IOC_VERIFY_VAL _IOWR('C', 0x03, unsigned int)
#define CRC_IOC_FILL_VAL _IOWR('C', 0x04, unsigned int)
#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/miscdevice.h>
struct crc_register {
u32 control;
u32 datacnt;
u32 datacntrld;
u32 __pad_1[2];
u32 compare;
u32 fillval;
u32 datafifo;
u32 intren;
u32 intrenset;
u32 intrenclr;
u32 poly;
u32 __pad_2[4];
u32 status;
u32 datacntcap;
u32 __pad_3;
u32 result;
u32 curresult;
u32 __pad_4[3];
u32 revid;
};
/* CRC_STATUS Masks */
#define CMPERR 0x00000002 /* Compare error */
#define DCNTEXP 0x00000010 /* datacnt register expired */
#define IBR 0x00010000 /* Input buffer ready */
#define OBR 0x00020000 /* Output buffer ready */
#define IRR 0x00040000 /* Immediate result readt */
#define LUTDONE 0x00080000 /* Look-up table generation done */
#define FSTAT 0x00700000 /* FIFO status */
#define MAX_FIFO 4 /* Max fifo size */
/* CRC_CONTROL Masks */
#define BLKEN 0x00000001 /* Block enable */
#define OPMODE 0x000000F0 /* Operation mode */
#define OPMODE_OFFSET 4 /* Operation mode mask offset*/
#define MODE_DMACPY_CRC 1 /* MTM CRC compute and compare */
#define MODE_DATA_FILL 2 /* MTM data fill */
#define MODE_CALC_CRC 3 /* MSM CRC compute and compare */
#define MODE_DATA_VERIFY 4 /* MSM data verify */
#define AUTOCLRZ 0x00000100 /* Auto clear to zero */
#define AUTOCLRF 0x00000200 /* Auto clear to one */
#define OBRSTALL 0x00001000 /* Stall on output buffer ready */
#define IRRSTALL 0x00002000 /* Stall on immediate result ready */
#define BITMIRR 0x00010000 /* Mirror bits within each byte of 32-bit input data */
#define BITMIRR_OFFSET 16 /* Mirror bits offset */
#define BYTMIRR 0x00020000 /* Mirror bytes of 32-bit input data */
#define BYTMIRR_OFFSET 17 /* Mirror bytes offset */
#define W16SWP 0x00040000 /* Mirror uppper and lower 16-bit word of 32-bit input data */
#define W16SWP_OFFSET 18 /* Mirror 16-bit word offset */
#define FDSEL 0x00080000 /* FIFO is written after input data is mirrored */
#define FDSEL_OFFSET 19 /* Mirror FIFO offset */
#define RSLTMIRR 0x00100000 /* CRC result registers are mirrored. */
#define RSLTMIRR_OFFSET 20 /* Mirror CRC result offset. */
#define POLYMIRR 0x00200000 /* CRC poly register is mirrored. */
#define POLYMIRR_OFFSET 21 /* Mirror CRC poly offset. */
#define CMPMIRR 0x00400000 /* CRC compare register is mirrored. */
#define CMPMIRR_OFFSET 22 /* Mirror CRC compare offset. */
/* CRC_INTREN Masks */
#define CMPERRI 0x02 /* CRC_ERROR_INTR */
#define DCNTEXPI 0x10 /* CRC_STATUS_INTR */
#endif
#endif
......@@ -303,6 +303,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
keys_fit_inline = false;
if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
......@@ -472,6 +473,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
keys_fit_inline = false;
if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
ctx->split_key_pad_len + ctx->enckeylen <=
CAAM_DESC_BYTES_MAX)
......@@ -527,6 +529,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
keys_fit_inline = false;
if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
ctx->split_key_pad_len + ctx->enckeylen <=
CAAM_DESC_BYTES_MAX)
......@@ -918,11 +921,8 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
edesc = (struct aead_edesc *)((char *)desc -
offsetof(struct aead_edesc, hw_desc));
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
if (err)
caam_jr_strstatus(jrdev, err);
aead_unmap(jrdev, edesc, req);
......@@ -969,11 +969,8 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
req->cryptlen - ctx->authsize, 1);
#endif
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
if (err)
caam_jr_strstatus(jrdev, err);
aead_unmap(jrdev, edesc, req);
......@@ -1018,11 +1015,8 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
edesc = (struct ablkcipher_edesc *)((char *)desc -
offsetof(struct ablkcipher_edesc, hw_desc));
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
if (err)
caam_jr_strstatus(jrdev, err);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
......@@ -1053,11 +1047,8 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
edesc = (struct ablkcipher_edesc *)((char *)desc -
offsetof(struct ablkcipher_edesc, hw_desc));
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
if (err)
caam_jr_strstatus(jrdev, err);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
......
......@@ -545,7 +545,8 @@ static int ahash_setkey(struct crypto_ahash *ahash,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
ret = -ENOMEM;
goto map_err;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
......@@ -559,6 +560,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
DMA_TO_DEVICE);
}
map_err:
kfree(hashed_key);
return ret;
badkey:
......@@ -631,11 +633,8 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
edesc = (struct ahash_edesc *)((char *)desc -
offsetof(struct ahash_edesc, hw_desc));
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
if (err)
caam_jr_strstatus(jrdev, err);
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
......@@ -669,11 +668,8 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
edesc = (struct ahash_edesc *)((char *)desc -
offsetof(struct ahash_edesc, hw_desc));
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
if (err)
caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
kfree(edesc);
......@@ -707,11 +703,8 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
edesc = (struct ahash_edesc *)((char *)desc -
offsetof(struct ahash_edesc, hw_desc));
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
if (err)
caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
......@@ -745,11 +738,8 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
edesc = (struct ahash_edesc *)((char *)desc -
offsetof(struct ahash_edesc, hw_desc));
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
if (err)
caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
kfree(edesc);
......
......@@ -103,11 +103,8 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
bd = (struct buf_data *)((char *)desc -
offsetof(struct buf_data, hw_desc));
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
if (err)
caam_jr_strstatus(jrdev, err);
atomic_set(&bd->empty, BUF_NOT_EMPTY);
complete(&bd->filled);
......
This diff is collapsed.
......@@ -7,5 +7,5 @@
#ifndef CAAM_ERROR_H
#define CAAM_ERROR_H
#define CAAM_ERROR_STR_MAX 302
extern char *caam_jr_strstatus(char *outstr, u32 status);
void caam_jr_strstatus(struct device *jrdev, u32 status);
#endif /* CAAM_ERROR_H */
......@@ -19,11 +19,8 @@ void split_key_done(struct device *dev, u32 *desc, u32 err,
dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
if (err)
caam_jr_strstatus(dev, err);
res->err = err;
......
......@@ -191,12 +191,12 @@ static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
ctx->complete = ccp_aes_xts_complete;
ctx->u.aes.key_len = 0;
fallback_tfm = crypto_alloc_ablkcipher(tfm->__crt_alg->cra_name, 0,
fallback_tfm = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
pr_warn("could not load fallback driver %s\n",
tfm->__crt_alg->cra_name);
crypto_tfm_alg_name(tfm));
return PTR_ERR(fallback_tfm);
}
ctx->u.aes.tfm_ablkcipher = fallback_tfm;
......
......@@ -48,12 +48,11 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
for (v = 0; v < ARRAY_SIZE(msix_entry); v++)
msix_entry[v].entry = v;
while ((ret = pci_enable_msix(pdev, msix_entry, v)) > 0)
v = ret;
if (ret)
ret = pci_enable_msix_range(pdev, msix_entry, 1, v);
if (ret < 0)
return ret;
ccp_pci->msix_count = v;
ccp_pci->msix_count = ret;
for (v = 0; v < ccp_pci->msix_count; v++) {
/* Set the interrupt names and request the irqs */
snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v);
......
......@@ -226,7 +226,7 @@ geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
op->dst = (void *) out;
op->mode = AES_MODE_ECB;
op->flags = 0;
op->len = AES_MIN_BLOCK_SIZE;
op->len = AES_BLOCK_SIZE;
op->dir = AES_DIR_ENCRYPT;
geode_aes_crypt(op);
......@@ -247,7 +247,7 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
op->dst = (void *) out;
op->mode = AES_MODE_ECB;
op->flags = 0;
op->len = AES_MIN_BLOCK_SIZE;
op->len = AES_BLOCK_SIZE;
op->dir = AES_DIR_DECRYPT;
geode_aes_crypt(op);
......@@ -255,7 +255,7 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
static int fallback_init_cip(struct crypto_tfm *tfm)
{
const char *name = tfm->__crt_alg->cra_name;
const char *name = crypto_tfm_alg_name(tfm);
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
op->fallback.cip = crypto_alloc_cipher(name, 0,
......@@ -286,7 +286,7 @@ static struct crypto_alg geode_alg = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_init = fallback_init_cip,
.cra_exit = fallback_exit_cip,
.cra_blocksize = AES_MIN_BLOCK_SIZE,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct geode_aes_op),
.cra_module = THIS_MODULE,
.cra_u = {
......@@ -320,7 +320,7 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_CBC;
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
op->dir = AES_DIR_DECRYPT;
ret = geode_aes_crypt(op);
......@@ -352,7 +352,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_CBC;
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
op->dir = AES_DIR_ENCRYPT;
ret = geode_aes_crypt(op);
......@@ -365,7 +365,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
static int fallback_init_blk(struct crypto_tfm *tfm)
{
const char *name = tfm->__crt_alg->cra_name;
const char *name = crypto_tfm_alg_name(tfm);
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
op->fallback.blk = crypto_alloc_blkcipher(name, 0,
......@@ -396,7 +396,7 @@ static struct crypto_alg geode_cbc_alg = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_init = fallback_init_blk,
.cra_exit = fallback_exit_blk,
.cra_blocksize = AES_MIN_BLOCK_SIZE,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct geode_aes_op),
.cra_alignmask = 15,
.cra_type = &crypto_blkcipher_type,
......@@ -408,7 +408,7 @@ static struct crypto_alg geode_cbc_alg = {
.setkey = geode_setkey_blk,
.encrypt = geode_cbc_encrypt,
.decrypt = geode_cbc_decrypt,
.ivsize = AES_IV_LENGTH,
.ivsize = AES_BLOCK_SIZE,
}
}
};
......@@ -432,7 +432,7 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_ECB;
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
op->dir = AES_DIR_DECRYPT;
ret = geode_aes_crypt(op);
......@@ -462,7 +462,7 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_ECB;
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
op->dir = AES_DIR_ENCRYPT;
ret = geode_aes_crypt(op);
......@@ -482,7 +482,7 @@ static struct crypto_alg geode_ecb_alg = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_init = fallback_init_blk,
.cra_exit = fallback_exit_blk,
.cra_blocksize = AES_MIN_BLOCK_SIZE,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct geode_aes_op),
.cra_alignmask = 15,
.cra_type = &crypto_blkcipher_type,
......@@ -547,7 +547,7 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (ret)
goto eecb;
printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
return 0;
eecb:
......@@ -565,7 +565,7 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
eenable:
pci_disable_device(dev);
printk(KERN_ERR "geode-aes: GEODE AES initialization failed.\n");
dev_err(&dev->dev, "GEODE AES initialization failed.\n");
return ret;
}
......
......@@ -10,10 +10,6 @@
#define _GEODE_AES_H_
/* driver logic flags */
#define AES_IV_LENGTH 16
#define AES_KEY_LENGTH 16
#define AES_MIN_BLOCK_SIZE 16
#define AES_MODE_ECB 0
#define AES_MODE_CBC 1
......@@ -64,7 +60,7 @@ struct geode_aes_op {
u32 flags;
int len;
u8 key[AES_KEY_LENGTH];
u8 key[AES_KEYSIZE_128];
u8 *iv;
union {
......
......@@ -622,8 +622,8 @@ static int queue_manag(void *data)
}
if (async_req) {
if (async_req->tfm->__crt_alg->cra_type !=
&crypto_ahash_type) {
if (crypto_tfm_alg_type(async_req->tfm) !=
CRYPTO_ALG_TYPE_AHASH) {
struct ablkcipher_request *req =
ablkcipher_request_cast(async_req);
mv_start_new_crypt_req(req);
......@@ -843,7 +843,7 @@ static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
enum hash_op op, int count_add)
{
const char *fallback_driver_name = tfm->__crt_alg->cra_name;
const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_shash *fallback_tfm = NULL;
struct crypto_shash *base_hash = NULL;
......
......@@ -104,7 +104,6 @@ struct dcp_sha_req_ctx {
* design of Linux Crypto API.
*/
static struct dcp *global_sdcp;
static DEFINE_MUTEX(global_mutex);
/* DCP register layout. */
#define MXS_DCP_CTRL 0x00
......@@ -482,7 +481,7 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
{
const char *name = tfm->__crt_alg->cra_name;
const char *name = crypto_tfm_alg_name(tfm);
const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
struct crypto_ablkcipher *blk;
......@@ -907,60 +906,49 @@ static int mxs_dcp_probe(struct platform_device *pdev)
struct resource *iores;
int dcp_vmi_irq, dcp_irq;
mutex_lock(&global_mutex);
if (global_sdcp) {
dev_err(dev, "Only one DCP instance allowed!\n");
ret = -ENODEV;
goto err_mutex;
return -ENODEV;
}
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dcp_vmi_irq = platform_get_irq(pdev, 0);
if (dcp_vmi_irq < 0) {
ret = dcp_vmi_irq;
goto err_mutex;
}
if (dcp_vmi_irq < 0)
return dcp_vmi_irq;
dcp_irq = platform_get_irq(pdev, 1);
if (dcp_irq < 0) {
ret = dcp_irq;
goto err_mutex;
}
if (dcp_irq < 0)
return dcp_irq;
sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
if (!sdcp) {
ret = -ENOMEM;
goto err_mutex;
}
if (!sdcp)
return -ENOMEM;
sdcp->dev = dev;
sdcp->base = devm_ioremap_resource(dev, iores);
if (IS_ERR(sdcp->base)) {
ret = PTR_ERR(sdcp->base);
goto err_mutex;
}
if (IS_ERR(sdcp->base))
return PTR_ERR(sdcp->base);
ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
"dcp-vmi-irq", sdcp);
if (ret) {
dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
goto err_mutex;
return ret;
}
ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
"dcp-irq", sdcp);
if (ret) {
dev_err(dev, "Failed to claim DCP IRQ!\n");
goto err_mutex;
return ret;
}
/* Allocate coherent helper block. */
sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
GFP_KERNEL);
if (!sdcp->coh) {
ret = -ENOMEM;
goto err_mutex;
}
if (!sdcp->coh)
return -ENOMEM;
/* Re-align the structure so it fits the DCP constraints. */
sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
......@@ -968,7 +956,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
/* Restart the DCP block. */
ret = stmp_reset_block(sdcp->base);
if (ret)
goto err_mutex;
return ret;
/* Initialize control register. */
writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
......@@ -1006,8 +994,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
NULL, "mxs_dcp_chan/sha");
if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
dev_err(dev, "Error starting SHA thread!\n");
ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
goto err_mutex;
return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
}
sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
......@@ -1064,9 +1051,6 @@ static int mxs_dcp_probe(struct platform_device *pdev)
err_destroy_sha_thread:
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
err_mutex:
mutex_unlock(&global_mutex);
return ret;
}
......@@ -1088,9 +1072,7 @@ static int mxs_dcp_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
mutex_lock(&global_mutex);
global_sdcp = NULL;
mutex_unlock(&global_mutex);
return 0;
}
......
......@@ -356,7 +356,7 @@ static int n2_hash_async_finup(struct ahash_request *req)
static int n2_hash_cra_init(struct crypto_tfm *tfm)
{
const char *fallback_driver_name = tfm->__crt_alg->cra_name;
const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct crypto_ahash *fallback_tfm;
......@@ -391,7 +391,7 @@ static void n2_hash_cra_exit(struct crypto_tfm *tfm)
static int n2_hmac_cra_init(struct crypto_tfm *tfm)
{
const char *fallback_driver_name = tfm->__crt_alg->cra_name;
const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
......
......@@ -1229,7 +1229,7 @@ static int __exit nx842_remove(struct vio_dev *viodev)
old_devdata = rcu_dereference_check(devdata,
lockdep_is_held(&devdata_mutex));
of_reconfig_notifier_unregister(&nx842_of_nb);
rcu_assign_pointer(devdata, NULL);
RCU_INIT_POINTER(devdata, NULL);
spin_unlock_irqrestore(&devdata_mutex, flags);
synchronize_rcu();
dev_set_drvdata(&viodev->dev, NULL);
......@@ -1280,7 +1280,7 @@ static void __exit nx842_exit(void)
spin_lock_irqsave(&devdata_mutex, flags);
old_devdata = rcu_dereference_check(devdata,
lockdep_is_held(&devdata_mutex));
rcu_assign_pointer(devdata, NULL);
RCU_INIT_POINTER(devdata, NULL);
spin_unlock_irqrestore(&devdata_mutex, flags);
synchronize_rcu();
if (old_devdata)
......
......@@ -223,12 +223,19 @@ static void omap_des_write_n(struct omap_des_dev *dd, u32 offset,
static int omap_des_hw_init(struct omap_des_dev *dd)
{
int err;
/*
* clocks are enabled when request starts and disabled when finished.
* It may be long delays between requests.
* Device might go to off mode to save power.
*/
pm_runtime_get_sync(dd->dev);
err = pm_runtime_get_sync(dd->dev);
if (err < 0) {
pm_runtime_put_noidle(dd->dev);
dev_err(dd->dev, "%s: failed to get_sync(%d)\n", __func__, err);
return err;
}
if (!(dd->flags & FLAGS_INIT)) {
dd->flags |= FLAGS_INIT;
......@@ -1074,16 +1081,20 @@ static int omap_des_probe(struct platform_device *pdev)
if (err)
goto err_res;
dd->io_base = devm_request_and_ioremap(dev, res);
if (!dd->io_base) {
dev_err(dev, "can't ioremap\n");
err = -ENOMEM;
dd->io_base = devm_ioremap_resource(dev, res);
if (IS_ERR(dd->io_base)) {
err = PTR_ERR(dd->io_base);
goto err_res;
}
dd->phys_base = res->start;
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
err = pm_runtime_get_sync(dev);
if (err < 0) {
pm_runtime_put_noidle(dev);
dev_err(dd->dev, "%s: failed to get_sync(%d)\n", __func__, err);
goto err_get;
}
omap_des_dma_stop(dd);
......@@ -1148,6 +1159,7 @@ static int omap_des_probe(struct platform_device *pdev)
err_irq:
tasklet_kill(&dd->done_task);
tasklet_kill(&dd->queue_task);
err_get:
pm_runtime_disable(dev);
err_res:
dd = NULL;
......@@ -1191,7 +1203,14 @@ static int omap_des_suspend(struct device *dev)
static int omap_des_resume(struct device *dev)
{
pm_runtime_get_sync(dev);
int err;
err = pm_runtime_get_sync(dev);
if (err < 0) {
pm_runtime_put_noidle(dev);
dev_err(dev, "%s: failed to get_sync(%d)\n", __func__, err);
return err;
}
return 0;
}
#endif
......
......@@ -211,7 +211,7 @@ static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
static int padlock_cra_init(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
const char *fallback_driver_name = tfm->__crt_alg->cra_name;
const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_shash *fallback_tfm;
int err = -ENOMEM;
......
......@@ -22,6 +22,7 @@
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/crypto.h>
#include <linux/interrupt.h>
......@@ -29,9 +30,6 @@
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <plat/cpu.h>
#include <mach/dma.h>
#define _SBF(s, v) ((v) << (s))
#define _BIT(b) _SBF(b, 1)
......@@ -105,7 +103,7 @@
#define SSS_REG_FCPKDMAO 0x005C
/* AES registers */
#define SSS_REG_AES_CONTROL 0x4000
#define SSS_REG_AES_CONTROL 0x00
#define SSS_AES_BYTESWAP_DI _BIT(11)
#define SSS_AES_BYTESWAP_DO _BIT(10)
#define SSS_AES_BYTESWAP_IV _BIT(9)
......@@ -121,21 +119,25 @@
#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
#define SSS_AES_MODE_DECRYPT _BIT(0)
#define SSS_REG_AES_STATUS 0x4004
#define SSS_REG_AES_STATUS 0x04
#define SSS_AES_BUSY _BIT(2)
#define SSS_AES_INPUT_READY _BIT(1)
#define SSS_AES_OUTPUT_READY _BIT(0)
#define SSS_REG_AES_IN_DATA(s) (0x4010 + (s << 2))
#define SSS_REG_AES_OUT_DATA(s) (0x4020 + (s << 2))
#define SSS_REG_AES_IV_DATA(s) (0x4030 + (s << 2))
#define SSS_REG_AES_CNT_DATA(s) (0x4040 + (s << 2))
#define SSS_REG_AES_KEY_DATA(s) (0x4080 + (s << 2))
#define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
#define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
#define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2))
#define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
#define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
#define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
#define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
#define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
#define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
#define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
SSS_AES_REG(dev, reg))
/* HW engine modes */
#define FLAGS_AES_DECRYPT _BIT(0)
#define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
......@@ -145,6 +147,20 @@
#define AES_KEY_LEN 16
#define CRYPTO_QUEUE_LEN 1
/**
* struct samsung_aes_variant - platform specific SSS driver data
* @has_hash_irq: true if SSS module uses hash interrupt, false otherwise
* @aes_offset: AES register offset from SSS module's base.
*
* Specifies platform specific configuration of SSS module.
* Note: A structure for driver specific platform data is used for future
* expansion of its usage.
*/
struct samsung_aes_variant {
bool has_hash_irq;
unsigned int aes_offset;
};
struct s5p_aes_reqctx {
unsigned long mode;
};
......@@ -161,6 +177,7 @@ struct s5p_aes_dev {
struct device *dev;
struct clk *clk;
void __iomem *ioaddr;
void __iomem *aes_ioaddr;
int irq_hash;
int irq_fc;
......@@ -173,10 +190,48 @@ struct s5p_aes_dev {
struct crypto_queue queue;
bool busy;
spinlock_t lock;
struct samsung_aes_variant *variant;
};
static struct s5p_aes_dev *s5p_dev;
static const struct samsung_aes_variant s5p_aes_data = {
.has_hash_irq = true,
.aes_offset = 0x4000,
};
static const struct samsung_aes_variant exynos_aes_data = {
.has_hash_irq = false,
.aes_offset = 0x200,
};
static const struct of_device_id s5p_sss_dt_match[] = {
{
.compatible = "samsung,s5pv210-secss",
.data = &s5p_aes_data,
},
{
.compatible = "samsung,exynos4210-secss",
.data = &exynos_aes_data,
},
{ },
};
MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
static inline struct samsung_aes_variant *find_s5p_sss_version
(struct platform_device *pdev)
{
if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
const struct of_device_id *match;
match = of_match_node(s5p_sss_dt_match,
pdev->dev.of_node);
return (struct samsung_aes_variant *)match->data;
}
return (struct samsung_aes_variant *)
platform_get_device_id(pdev)->driver_data;
}
static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
......@@ -272,8 +327,12 @@ static void s5p_aes_tx(struct s5p_aes_dev *dev)
}
s5p_set_dma_outdata(dev, dev->sg_dst);
} else
} else {
s5p_aes_complete(dev, err);
dev->busy = true;
tasklet_schedule(&dev->tasklet);
}
}
static void s5p_aes_rx(struct s5p_aes_dev *dev)
......@@ -322,14 +381,15 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
{
void __iomem *keystart;
memcpy(dev->ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
if (iv)
memcpy(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
if (keylen == AES_KEYSIZE_256)
keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(0);
keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
else if (keylen == AES_KEYSIZE_192)
keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(2);
keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
else
keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(4);
keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
memcpy(keystart, key, keylen);
}
......@@ -379,7 +439,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
if (err)
goto outdata_error;
SSS_WRITE(dev, AES_CONTROL, aes_control);
SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
s5p_set_dma_indata(dev, req->src);
......@@ -410,10 +470,13 @@ static void s5p_tasklet_cb(unsigned long data)
spin_lock_irqsave(&dev->lock, flags);
backlog = crypto_get_backlog(&dev->queue);
async_req = crypto_dequeue_request(&dev->queue);
spin_unlock_irqrestore(&dev->lock, flags);
if (!async_req)
if (!async_req) {
dev->busy = false;
spin_unlock_irqrestore(&dev->lock, flags);
return;
}
spin_unlock_irqrestore(&dev->lock, flags);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
......@@ -432,14 +495,13 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
int err;
spin_lock_irqsave(&dev->lock, flags);
err = ablkcipher_enqueue_request(&dev->queue, req);
if (dev->busy) {
err = -EAGAIN;
spin_unlock_irqrestore(&dev->lock, flags);
goto exit;
}
dev->busy = true;
err = ablkcipher_enqueue_request(&dev->queue, req);
spin_unlock_irqrestore(&dev->lock, flags);
tasklet_schedule(&dev->tasklet);
......@@ -564,6 +626,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
struct s5p_aes_dev *pdata;
struct device *dev = &pdev->dev;
struct resource *res;
struct samsung_aes_variant *variant;
if (s5p_dev)
return -EEXIST;
......@@ -577,30 +640,25 @@ static int s5p_aes_probe(struct platform_device *pdev)
if (IS_ERR(pdata->ioaddr))
return PTR_ERR(pdata->ioaddr);
variant = find_s5p_sss_version(pdev);
pdata->clk = devm_clk_get(dev, "secss");
if (IS_ERR(pdata->clk)) {
dev_err(dev, "failed to find secss clock source\n");
return -ENOENT;
}
clk_enable(pdata->clk);
err = clk_prepare_enable(pdata->clk);
if (err < 0) {
dev_err(dev, "Enabling SSS clk failed, err %d\n", err);
return err;
}
spin_lock_init(&pdata->lock);
pdata->irq_hash = platform_get_irq_byname(pdev, "hash");
if (pdata->irq_hash < 0) {
err = pdata->irq_hash;
dev_warn(dev, "hash interrupt is not available.\n");
goto err_irq;
}
err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
IRQF_SHARED, pdev->name, pdev);
if (err < 0) {
dev_warn(dev, "hash interrupt is not available.\n");
goto err_irq;
}
pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
pdata->irq_fc = platform_get_irq_byname(pdev, "feed control");
pdata->irq_fc = platform_get_irq(pdev, 0);
if (pdata->irq_fc < 0) {
err = pdata->irq_fc;
dev_warn(dev, "feed control interrupt is not available.\n");
......@@ -613,6 +671,23 @@ static int s5p_aes_probe(struct platform_device *pdev)
goto err_irq;
}
if (variant->has_hash_irq) {
pdata->irq_hash = platform_get_irq(pdev, 1);
if (pdata->irq_hash < 0) {
err = pdata->irq_hash;
dev_warn(dev, "hash interrupt is not available.\n");
goto err_irq;
}
err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
IRQF_SHARED, pdev->name, pdev);
if (err < 0) {
dev_warn(dev, "hash interrupt is not available.\n");
goto err_irq;
}
}
pdata->busy = false;
pdata->variant = variant;
pdata->dev = dev;
platform_set_drvdata(pdev, pdata);
s5p_dev = pdata;
......@@ -639,7 +714,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
tasklet_kill(&pdata->tasklet);
err_irq:
clk_disable(pdata->clk);
clk_disable_unprepare(pdata->clk);
s5p_dev = NULL;
......@@ -659,7 +734,7 @@ static int s5p_aes_remove(struct platform_device *pdev)
tasklet_kill(&pdata->tasklet);
clk_disable(pdata->clk);
clk_disable_unprepare(pdata->clk);
s5p_dev = NULL;
......@@ -672,6 +747,7 @@ static struct platform_driver s5p_aes_crypto = {
.driver = {
.owner = THIS_MODULE,
.name = "s5p-secss",
.of_match_table = s5p_sss_dt_match,
},
};
......
......@@ -728,7 +728,7 @@ static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
static int sahara_aes_cra_init(struct crypto_tfm *tfm)
{
const char *name = tfm->__crt_alg->cra_name;
const char *name = crypto_tfm_alg_name(tfm);
struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->fallback = crypto_alloc_ablkcipher(name, 0,
......
......@@ -55,15 +55,28 @@ extern const struct crypto_type crypto_ahash_type;
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk);
int crypto_ahash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk);
int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
struct crypto_hash_walk *walk,
struct scatterlist *sg, unsigned int len);
static inline int crypto_ahash_walk_done(struct crypto_hash_walk *walk,
int err)
{
return crypto_hash_walk_done(walk, err);
}
static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
{
return !(walk->entrylen | walk->total);
}
static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk)
{
return crypto_hash_walk_last(walk);
}
int crypto_register_ahash(struct ahash_alg *alg);
int crypto_unregister_ahash(struct ahash_alg *alg);
int ahash_register_instance(struct crypto_template *tmpl,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment