Commit 3b23e665 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (50 commits)
  crypto: ixp4xx - Select CRYPTO_AUTHENC
  crypto: s390 - Respect STFL bit
  crypto: talitos - Add support for sha256 and md5 variants
  crypto: hash - Move ahash functions into crypto/hash.h
  crypto: crc32c - Add ahash implementation
  crypto: hash - Added scatter list walking helper
  crypto: prng - Deterministic CPRNG
  crypto: hash - Removed vestigial ahash fields
  crypto: hash - Fixed digest size check
  crypto: rmd - sparse annotations
  crypto: rmd128 - sparse annotations
  crypto: camellia - Use kernel-provided bitops, unaligned access helpers
  crypto: talitos - Use proper form for algorithm driver names
  crypto: talitos - Add support for 3des
  crypto: padlock - Make module loading quieter when hardware isn't available
  crypto: tcrpyt - Remove unnecessary kmap/kunmap calls
  crypto: ixp4xx - Hardware crypto support for IXP4xx CPUs
  crypto: talitos - Freescale integrated security engine (SEC) driver
  [CRYPTO] tcrypt: Add self test for des3_ebe cipher operating in cbc mode
  [CRYPTO] rmd: Use pointer form of endian swapping operations
  ...
parents 6c118e43 090657e4
......@@ -296,6 +296,10 @@ static inline int crypt_s390_func_available(int func)
unsigned char status[16];
int ret;
/* check if CPACF facility (bit 17) is available */
if (!(stfl() & 1ULL << (31 - 17)))
return 0;
switch (func & CRYPT_S390_OP_MASK) {
case CRYPT_S390_KM:
ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
......
......@@ -65,6 +65,7 @@ config CRYPTO_NULL
config CRYPTO_CRYPTD
tristate "Software async crypto daemon"
select CRYPTO_BLKCIPHER
select CRYPTO_HASH
select CRYPTO_MANAGER
help
This is a generic software asynchronous crypto daemon that
......@@ -212,7 +213,7 @@ comment "Digest"
config CRYPTO_CRC32C
tristate "CRC32c CRC algorithm"
select CRYPTO_ALGAPI
select CRYPTO_HASH
select LIBCRC32C
help
Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used
......@@ -241,6 +242,57 @@ config CRYPTO_MICHAEL_MIC
should not be used for other purposes because of the weakness
of the algorithm.
config CRYPTO_RMD128
tristate "RIPEMD-128 digest algorithm"
select CRYPTO_ALGAPI
help
RIPEMD-128 (ISO/IEC 10118-3:2004).
RIPEMD-128 is a 128-bit cryptographic hash function. It should only
to be used as a secure replacement for RIPEMD. For other use cases
RIPEMD-160 should be used.
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD160
tristate "RIPEMD-160 digest algorithm"
select CRYPTO_ALGAPI
help
RIPEMD-160 (ISO/IEC 10118-3:2004).
RIPEMD-160 is a 160-bit cryptographic hash function. It is intended
to be used as a secure replacement for the 128-bit hash functions
MD4, MD5 and it's predecessor RIPEMD (not to be confused with RIPEMD-128).
It's speed is comparable to SHA1 and there are no known attacks against
RIPEMD-160.
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD256
tristate "RIPEMD-256 digest algorithm"
select CRYPTO_ALGAPI
help
RIPEMD-256 is an optional extension of RIPEMD-128 with a 256 bit hash.
It is intended for applications that require longer hash-results, without
needing a larger security level (than RIPEMD-128).
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD320
tristate "RIPEMD-320 digest algorithm"
select CRYPTO_ALGAPI
help
RIPEMD-320 is an optional extension of RIPEMD-160 with a 320 bit hash.
It is intended for applications that require longer hash-results, without
needing a larger security level (than RIPEMD-160).
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_SHA1
tristate "SHA1 digest algorithm"
select CRYPTO_ALGAPI
......@@ -614,6 +666,15 @@ config CRYPTO_LZO
help
This is the LZO algorithm.
comment "Random Number Generation"
config CRYPTO_PRNG
tristate "Pseudo Random Number Generation for Cryptographic modules"
help
This option enables the generic pseudo random number generator
for cryptographic modules. Uses the Algorithm specified in
ANSI X9.31 A.2.4
source "drivers/crypto/Kconfig"
endif # if CRYPTO
......@@ -19,6 +19,7 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
crypto_hash-objs := hash.o
crypto_hash-objs += ahash.o
obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o
......@@ -27,6 +28,10 @@ obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o
obj-$(CONFIG_CRYPTO_MD5) += md5.o
obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o
obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
obj-$(CONFIG_CRYPTO_RMD256) += rmd256.o
obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
......@@ -64,7 +69,7 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
#
......
/*
* Asynchronous Cryptographic Hash operations.
*
* This is the asynchronous version of hash.c with notification of
* completion via a callback.
*
* Copyright (c) 2008 Loc Ho <lho@amcc.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include "internal.h"
static int hash_walk_next(struct crypto_hash_walk *walk)
{
unsigned int alignmask = walk->alignmask;
unsigned int offset = walk->offset;
unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset);
walk->data = crypto_kmap(walk->pg, 0);
walk->data += offset;
if (offset & alignmask)
nbytes = alignmask + 1 - (offset & alignmask);
walk->entrylen -= nbytes;
return nbytes;
}
static int hash_walk_new_entry(struct crypto_hash_walk *walk)
{
struct scatterlist *sg;
sg = walk->sg;
walk->pg = sg_page(sg);
walk->offset = sg->offset;
walk->entrylen = sg->length;
if (walk->entrylen > walk->total)
walk->entrylen = walk->total;
walk->total -= walk->entrylen;
return hash_walk_next(walk);
}
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
unsigned int alignmask = walk->alignmask;
unsigned int nbytes = walk->entrylen;
walk->data -= walk->offset;
if (nbytes && walk->offset & alignmask && !err) {
walk->offset += alignmask - 1;
walk->offset = ALIGN(walk->offset, alignmask + 1);
walk->data += walk->offset;
nbytes = min(nbytes,
((unsigned int)(PAGE_SIZE)) - walk->offset);
walk->entrylen -= nbytes;
return nbytes;
}
crypto_kunmap(walk->data, 0);
crypto_yield(walk->flags);
if (err)
return err;
walk->offset = 0;
if (nbytes)
return hash_walk_next(walk);
if (!walk->total)
return 0;
walk->sg = scatterwalk_sg_next(walk->sg);
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk)
{
walk->total = req->nbytes;
if (!walk->total)
return 0;
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
walk->sg = req->src;
walk->flags = req->base.flags;
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct ahash_alg *ahash = crypto_ahash_alg(tfm);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
int ret;
u8 *buffer, *alignbuffer;
unsigned long absize;
absize = keylen + alignmask;
buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = ahash->setkey(tfm, alignbuffer, keylen);
memset(alignbuffer, 0, keylen);
kfree(buffer);
return ret;
}
static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct ahash_alg *ahash = crypto_ahash_alg(tfm);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
if ((unsigned long)key & alignmask)
return ahash_setkey_unaligned(tfm, key, keylen);
return ahash->setkey(tfm, key, keylen);
}
static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type,
u32 mask)
{
return alg->cra_ctxsize;
}
static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash;
struct ahash_tfm *crt = &tfm->crt_ahash;
if (alg->digestsize > PAGE_SIZE / 8)
return -EINVAL;
crt->init = alg->init;
crt->update = alg->update;
crt->final = alg->final;
crt->digest = alg->digest;
crt->setkey = ahash_setkey;
crt->digestsize = alg->digestsize;
return 0;
}
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_printf(m, "type : ahash\n");
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
"yes" : "no");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize);
}
const struct crypto_type crypto_ahash_type = {
.ctxsize = crypto_ahash_ctxsize,
.init = crypto_init_ahash_ops,
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
};
EXPORT_SYMBOL_GPL(crypto_ahash_type);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
......@@ -235,8 +235,12 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
return crypto_init_cipher_ops(tfm);
case CRYPTO_ALG_TYPE_DIGEST:
return crypto_init_digest_ops(tfm);
if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) !=
CRYPTO_ALG_TYPE_HASH_MASK)
return crypto_init_digest_ops_async(tfm);
else
return crypto_init_digest_ops(tfm);
case CRYPTO_ALG_TYPE_COMPRESS:
return crypto_init_compress_ops(tfm);
......
......@@ -35,6 +35,8 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <asm/unaligned.h>
static const u32 camellia_sp1110[256] = {
0x70707000,0x82828200,0x2c2c2c00,0xececec00,
......@@ -335,20 +337,6 @@ static const u32 camellia_sp4404[256] = {
/*
* macros
*/
#define GETU32(v, pt) \
do { \
/* latest breed of gcc is clever enough to use move */ \
memcpy(&(v), (pt), 4); \
(v) = be32_to_cpu(v); \
} while(0)
/* rotation right shift 1byte */
#define ROR8(x) (((x) >> 8) + ((x) << 24))
/* rotation left shift 1bit */
#define ROL1(x) (((x) << 1) + ((x) >> 31))
/* rotation left shift 1byte */
#define ROL8(x) (((x) << 8) + ((x) >> 24))
#define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \
do { \
w0 = ll; \
......@@ -383,7 +371,7 @@ static const u32 camellia_sp4404[256] = {
^ camellia_sp3033[(u8)(il >> 8)] \
^ camellia_sp4404[(u8)(il )]; \
yl ^= yr; \
yr = ROR8(yr); \
yr = ror32(yr, 8); \
yr ^= yl; \
} while(0)
......@@ -405,7 +393,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[7] ^= subL[1]; subR[7] ^= subR[1];
subL[1] ^= subR[1] & ~subR[9];
dw = subL[1] & subL[9],
subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */
/* round 8 */
subL[11] ^= subL[1]; subR[11] ^= subR[1];
/* round 10 */
......@@ -414,7 +402,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[15] ^= subL[1]; subR[15] ^= subR[1];
subL[1] ^= subR[1] & ~subR[17];
dw = subL[1] & subL[17],
subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */
/* round 14 */
subL[19] ^= subL[1]; subR[19] ^= subR[1];
/* round 16 */
......@@ -430,7 +418,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
} else {
subL[1] ^= subR[1] & ~subR[25];
dw = subL[1] & subL[25],
subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */
/* round 20 */
subL[27] ^= subL[1]; subR[27] ^= subR[1];
/* round 22 */
......@@ -450,7 +438,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[26] ^= kw4l; subR[26] ^= kw4r;
kw4l ^= kw4r & ~subR[24];
dw = kw4l & subL[24],
kw4r ^= ROL1(dw); /* modified for FL(kl5) */
kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */
}
/* round 17 */
subL[22] ^= kw4l; subR[22] ^= kw4r;
......@@ -460,7 +448,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[18] ^= kw4l; subR[18] ^= kw4r;
kw4l ^= kw4r & ~subR[16];
dw = kw4l & subL[16],
kw4r ^= ROL1(dw); /* modified for FL(kl3) */
kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */
/* round 11 */
subL[14] ^= kw4l; subR[14] ^= kw4r;
/* round 9 */
......@@ -469,7 +457,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
subL[10] ^= kw4l; subR[10] ^= kw4r;
kw4l ^= kw4r & ~subR[8];
dw = kw4l & subL[8],
kw4r ^= ROL1(dw); /* modified for FL(kl1) */
kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */
/* round 5 */
subL[6] ^= kw4l; subR[6] ^= kw4r;
/* round 3 */
......@@ -494,7 +482,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(6) = subR[5] ^ subR[7];
tl = subL[10] ^ (subR[10] & ~subR[8]);
dw = tl & subL[8], /* FL(kl1) */
tr = subR[10] ^ ROL1(dw);
tr = subR[10] ^ rol32(dw, 1);
SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */
SUBKEY_R(7) = subR[6] ^ tr;
SUBKEY_L(8) = subL[8]; /* FL(kl1) */
......@@ -503,7 +491,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(9) = subR[9];
tl = subL[7] ^ (subR[7] & ~subR[9]);
dw = tl & subL[9], /* FLinv(kl2) */
tr = subR[7] ^ ROL1(dw);
tr = subR[7] ^ rol32(dw, 1);
SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */
SUBKEY_R(10) = tr ^ subR[11];
SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */
......@@ -516,7 +504,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(14) = subR[13] ^ subR[15];
tl = subL[18] ^ (subR[18] & ~subR[16]);
dw = tl & subL[16], /* FL(kl3) */
tr = subR[18] ^ ROL1(dw);
tr = subR[18] ^ rol32(dw, 1);
SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */
SUBKEY_R(15) = subR[14] ^ tr;
SUBKEY_L(16) = subL[16]; /* FL(kl3) */
......@@ -525,7 +513,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(17) = subR[17];
tl = subL[15] ^ (subR[15] & ~subR[17]);
dw = tl & subL[17], /* FLinv(kl4) */
tr = subR[15] ^ ROL1(dw);
tr = subR[15] ^ rol32(dw, 1);
SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */
SUBKEY_R(18) = tr ^ subR[19];
SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */
......@@ -544,7 +532,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
} else {
tl = subL[26] ^ (subR[26] & ~subR[24]);
dw = tl & subL[24], /* FL(kl5) */
tr = subR[26] ^ ROL1(dw);
tr = subR[26] ^ rol32(dw, 1);
SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */
SUBKEY_R(23) = subR[22] ^ tr;
SUBKEY_L(24) = subL[24]; /* FL(kl5) */
......@@ -553,7 +541,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
SUBKEY_R(25) = subR[25];
tl = subL[23] ^ (subR[23] & ~subR[25]);
dw = tl & subL[25], /* FLinv(kl6) */
tr = subR[23] ^ ROL1(dw);
tr = subR[23] ^ rol32(dw, 1);
SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */
SUBKEY_R(26) = tr ^ subR[27];
SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */
......@@ -573,17 +561,17 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
/* apply the inverse of the last half of P-function */
i = 2;
do {
dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = ROL8(dw);/* round 1 */
dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = rol32(dw, 8);/* round 1 */
SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw;
dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = ROL8(dw);/* round 2 */
dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = rol32(dw, 8);/* round 2 */
SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw;
dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = ROL8(dw);/* round 3 */
dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = rol32(dw, 8);/* round 3 */
SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw;
dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = ROL8(dw);/* round 4 */
dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = rol32(dw, 8);/* round 4 */
SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw;
dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = ROL8(dw);/* round 5 */
dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = rol32(dw, 9);/* round 5 */
SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw;
dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = ROL8(dw);/* round 6 */
dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = rol32(dw, 8);/* round 6 */
SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw;
i += 8;
} while (i < max);
......@@ -599,10 +587,10 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey)
/**
* k == kll || klr || krl || krr (|| is concatenation)
*/
GETU32(kll, key );
GETU32(klr, key + 4);
GETU32(krl, key + 8);
GETU32(krr, key + 12);
kll = get_unaligned_be32(key);
klr = get_unaligned_be32(key + 4);
krl = get_unaligned_be32(key + 8);
krr = get_unaligned_be32(key + 12);
/* generate KL dependent subkeys */
/* kw1 */
......@@ -707,14 +695,14 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey)
* key = (kll || klr || krl || krr || krll || krlr || krrl || krrr)
* (|| is concatenation)
*/
GETU32(kll, key );
GETU32(klr, key + 4);
GETU32(krl, key + 8);
GETU32(krr, key + 12);
GETU32(krll, key + 16);
GETU32(krlr, key + 20);
GETU32(krrl, key + 24);
GETU32(krrr, key + 28);
kll = get_unaligned_be32(key);
klr = get_unaligned_be32(key + 4);
krl = get_unaligned_be32(key + 8);
krr = get_unaligned_be32(key + 12);
krll = get_unaligned_be32(key + 16);
krlr = get_unaligned_be32(key + 20);
krrl = get_unaligned_be32(key + 24);
krrr = get_unaligned_be32(key + 28);
/* generate KL dependent subkeys */
/* kw1 */
......@@ -870,13 +858,13 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
t0 &= ll; \
t2 |= rr; \
rl ^= t2; \
lr ^= ROL1(t0); \
lr ^= rol32(t0, 1); \
t3 = krl; \
t1 = klr; \
t3 &= rl; \
t1 |= lr; \
ll ^= t1; \
rr ^= ROL1(t3); \
rr ^= rol32(t3, 1); \
} while(0)
#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \
......@@ -892,7 +880,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
il ^= kl; \
ir ^= il ^ kr; \
yl ^= ir; \
yr ^= ROR8(il) ^ ir; \
yr ^= ror32(il, 8) ^ ir; \
} while(0)
/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */
......
......@@ -5,20 +5,23 @@
*
* This module file is a wrapper to invoke the lib/crc32c routines.
*
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <linux/crc32c.h>
#include <linux/kernel.h>
#define CHKSUM_BLOCK_SIZE 32
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
struct chksum_ctx {
......@@ -71,7 +74,7 @@ static void chksum_final(struct crypto_tfm *tfm, u8 *out)
*(__le32 *)out = ~cpu_to_le32(mctx->crc);
}
static int crc32c_cra_init(struct crypto_tfm *tfm)
static int crc32c_cra_init_old(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
......@@ -79,14 +82,14 @@ static int crc32c_cra_init(struct crypto_tfm *tfm)
return 0;
}
static struct crypto_alg alg = {
static struct crypto_alg old_alg = {
.cra_name = "crc32c",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_init = crc32c_cra_init,
.cra_list = LIST_HEAD_INIT(old_alg.cra_list),
.cra_init = crc32c_cra_init_old,
.cra_u = {
.digest = {
.dia_digestsize= CHKSUM_DIGEST_SIZE,
......@@ -98,14 +101,125 @@ static struct crypto_alg alg = {
}
};
/*
* Setting the seed allows arbitrary accumulators and flexible XOR policy
* If your algorithm starts with ~0, then XOR with ~0 before you set
* the seed.
*/
static int crc32c_setkey(struct crypto_ahash *hash, const u8 *key,
unsigned int keylen)
{
u32 *mctx = crypto_ahash_ctx(hash);
if (keylen != sizeof(u32)) {
crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
*mctx = le32_to_cpup((__le32 *)key);
return 0;
}
static int crc32c_init(struct ahash_request *req)
{
u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
u32 *crcp = ahash_request_ctx(req);
*crcp = *mctx;
return 0;
}
static int crc32c_update(struct ahash_request *req)
{
struct crypto_hash_walk walk;
u32 *crcp = ahash_request_ctx(req);
u32 crc = *crcp;
int nbytes;
for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
nbytes = crypto_hash_walk_done(&walk, 0))
crc = crc32c(crc, walk.data, nbytes);
*crcp = crc;
return 0;
}
static int crc32c_final(struct ahash_request *req)
{
u32 *crcp = ahash_request_ctx(req);
*(__le32 *)req->result = ~cpu_to_le32p(crcp);
return 0;
}
static int crc32c_digest(struct ahash_request *req)
{
struct crypto_hash_walk walk;
u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
u32 crc = *mctx;
int nbytes;
for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
nbytes = crypto_hash_walk_done(&walk, 0))
crc = crc32c(crc, walk.data, nbytes);
*(__le32 *)req->result = ~cpu_to_le32(crc);
return 0;
}
static int crc32c_cra_init(struct crypto_tfm *tfm)
{
u32 *key = crypto_tfm_ctx(tfm);
*key = ~0;
tfm->crt_ahash.reqsize = sizeof(u32);
return 0;
}
static struct crypto_alg alg = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_init = crc32c_cra_init,
.cra_type = &crypto_ahash_type,
.cra_u = {
.ahash = {
.digestsize = CHKSUM_DIGEST_SIZE,
.setkey = crc32c_setkey,
.init = crc32c_init,
.update = crc32c_update,
.final = crc32c_final,
.digest = crc32c_digest,
}
}
};
static int __init crc32c_mod_init(void)
{
return crypto_register_alg(&alg);
int err;
err = crypto_register_alg(&old_alg);
if (err)
return err;
err = crypto_register_alg(&alg);
if (err)
crypto_unregister_alg(&old_alg);
return err;
}
static void __exit crc32c_mod_fini(void)
{
crypto_unregister_alg(&alg);
crypto_unregister_alg(&old_alg);
}
module_init(crc32c_mod_init);
......
......@@ -11,6 +11,7 @@
*/
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
......@@ -45,6 +46,13 @@ struct cryptd_blkcipher_request_ctx {
crypto_completion_t complete;
};
struct cryptd_hash_ctx {
struct crypto_hash *child;
};
struct cryptd_hash_request_ctx {
crypto_completion_t complete;
};
static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm)
{
......@@ -82,10 +90,8 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
rctx = ablkcipher_request_ctx(req);
if (unlikely(err == -EINPROGRESS)) {
rctx->complete(&req->base, err);
return;
}
if (unlikely(err == -EINPROGRESS))
goto out;
desc.tfm = child;
desc.info = req->info;
......@@ -95,8 +101,9 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
req->base.complete = rctx->complete;
out:
local_bh_disable();
req->base.complete(&req->base, err);
rctx->complete(&req->base, err);
local_bh_enable();
}
......@@ -261,6 +268,240 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
return inst;
}
static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_spawn *spawn = &ictx->spawn;
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_hash *cipher;
cipher = crypto_spawn_hash(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
tfm->crt_ahash.reqsize =
sizeof(struct cryptd_hash_request_ctx);
return 0;
}
static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct cryptd_state *state = cryptd_get_state(tfm);
int active;
mutex_lock(&state->mutex);
active = ahash_tfm_in_queue(&state->queue,
__crypto_ahash_cast(tfm));
mutex_unlock(&state->mutex);
BUG_ON(active);
crypto_free_hash(ctx->child);
}
static int cryptd_hash_setkey(struct crypto_ahash *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
struct crypto_hash *child = ctx->child;
int err;
crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_hash_setkey(child, key, keylen);
crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int cryptd_hash_enqueue(struct ahash_request *req,
crypto_completion_t complete)
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_state *state =
cryptd_get_state(crypto_ahash_tfm(tfm));
int err;
rctx->complete = req->base.complete;
req->base.complete = complete;
spin_lock_bh(&state->lock);
err = ahash_enqueue_request(&state->queue, req);
spin_unlock_bh(&state->lock);
wake_up_process(state->task);
return err;
}
static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_hash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
desc.tfm = child;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_crt(child)->init(&desc);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_init_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_init);
}
static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_hash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
desc.tfm = child;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_crt(child)->update(&desc,
req->src,
req->nbytes);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_update_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_update);
}
static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_hash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
desc.tfm = child;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_crt(child)->final(&desc, req->result);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_final_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_final);
}
static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_hash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
desc.tfm = child;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_crt(child)->digest(&desc,
req->src,
req->nbytes,
req->result);
req->base.complete = rctx->complete;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_digest_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_digest);
}
static struct crypto_instance *cryptd_alloc_hash(
struct rtattr **tb, struct cryptd_state *state)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
CRYPTO_ALG_TYPE_HASH_MASK);
if (IS_ERR(alg))
return ERR_PTR(PTR_ERR(alg));
inst = cryptd_alloc_instance(alg, state);
if (IS_ERR(inst))
goto out_put_alg;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
inst->alg.cra_type = &crypto_ahash_type;
inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize;
inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
inst->alg.cra_init = cryptd_hash_init_tfm;
inst->alg.cra_exit = cryptd_hash_exit_tfm;
inst->alg.cra_ahash.init = cryptd_hash_init_enqueue;
inst->alg.cra_ahash.update = cryptd_hash_update_enqueue;
inst->alg.cra_ahash.final = cryptd_hash_final_enqueue;
inst->alg.cra_ahash.setkey = cryptd_hash_setkey;
inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue;
out_put_alg:
crypto_mod_put(alg);
return inst;
}
static struct cryptd_state state;
static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
......@@ -274,6 +515,8 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
return cryptd_alloc_blkcipher(tb, &state);
case CRYPTO_ALG_TYPE_DIGEST:
return cryptd_alloc_hash(tb, &state);
}
return ERR_PTR(-EINVAL);
......
......@@ -12,6 +12,7 @@
*
*/
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <linux/mm.h>
#include <linux/errno.h>
......@@ -141,7 +142,7 @@ int crypto_init_digest_ops(struct crypto_tfm *tfm)
struct hash_tfm *ops = &tfm->crt_hash;
struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm))
if (dalg->dia_digestsize > PAGE_SIZE / 8)
return -EINVAL;
ops->init = init;
......@@ -157,3 +158,83 @@ int crypto_init_digest_ops(struct crypto_tfm *tfm)
void crypto_exit_digest_ops(struct crypto_tfm *tfm)
{
}
static int digest_async_nosetkey(struct crypto_ahash *tfm_async, const u8 *key,
unsigned int keylen)
{
crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
return -ENOSYS;
}
static int digest_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
return dalg->dia_setkey(tfm, key, keylen);
}
static int digest_async_init(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
dalg->dia_init(tfm);
return 0;
}
static int digest_async_update(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
update(&desc, req->src, req->nbytes);
return 0;
}
static int digest_async_final(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
final(&desc, req->result);
return 0;
}
static int digest_async_digest(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
return digest(&desc, req->src, req->nbytes, req->result);
}
int crypto_init_digest_ops_async(struct crypto_tfm *tfm)
{
struct ahash_tfm *crt = &tfm->crt_ahash;
struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm))
return -EINVAL;
crt->init = digest_async_init;
crt->update = digest_async_update;
crt->final = digest_async_final;
crt->digest = digest_async_digest;
crt->setkey = dalg->dia_setkey ? digest_async_setkey :
digest_async_nosetkey;
crt->digestsize = dalg->dia_digestsize;
return 0;
}
......@@ -9,6 +9,7 @@
* any later version.
*/
#include <crypto/internal/hash.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
......@@ -59,24 +60,107 @@ static int hash_setkey(struct crypto_hash *crt, const u8 *key,
return alg->setkey(crt, key, keylen);
}
static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
static int hash_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
struct crypto_hash *tfm_hash = __crypto_hash_cast(tfm);
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
return alg->setkey(tfm_hash, key, keylen);
}
static int hash_async_init(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
return alg->init(&desc);
}
static int hash_async_update(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
return alg->update(&desc, req->src, req->nbytes);
}
static int hash_async_final(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
return alg->final(&desc, req->result);
}
static int hash_async_digest(struct ahash_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
struct hash_desc desc = {
.tfm = __crypto_hash_cast(tfm),
.flags = req->base.flags,
};
return alg->digest(&desc, req->src, req->nbytes, req->result);
}
static int crypto_init_hash_ops_async(struct crypto_tfm *tfm)
{
struct ahash_tfm *crt = &tfm->crt_ahash;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
crt->init = hash_async_init;
crt->update = hash_async_update;
crt->final = hash_async_final;
crt->digest = hash_async_digest;
crt->setkey = hash_async_setkey;
crt->digestsize = alg->digestsize;
return 0;
}
static int crypto_init_hash_ops_sync(struct crypto_tfm *tfm)
{
struct hash_tfm *crt = &tfm->crt_hash;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
if (alg->digestsize > crypto_tfm_alg_blocksize(tfm))
return -EINVAL;
crt->init = alg->init;
crt->update = alg->update;
crt->final = alg->final;
crt->digest = alg->digest;
crt->setkey = hash_setkey;
crt->init = alg->init;
crt->update = alg->update;
crt->final = alg->final;
crt->digest = alg->digest;
crt->setkey = hash_setkey;
crt->digestsize = alg->digestsize;
return 0;
}
static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
if (alg->digestsize > PAGE_SIZE / 8)
return -EINVAL;
if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != CRYPTO_ALG_TYPE_HASH_MASK)
return crypto_init_hash_ops_async(tfm);
else
return crypto_init_hash_ops_sync(tfm);
}
static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
......
......@@ -226,6 +226,7 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
struct crypto_instance *inst;
struct crypto_alg *alg;
int err;
int ds;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH);
if (err)
......@@ -236,6 +237,13 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
if (IS_ERR(alg))
return ERR_CAST(alg);
inst = ERR_PTR(-EINVAL);
ds = (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
alg->cra_digest.dia_digestsize;
if (ds > alg->cra_blocksize)
goto out_put_alg;
inst = crypto_alloc_instance("hmac", alg);
if (IS_ERR(inst))
goto out_put_alg;
......@@ -246,14 +254,10 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
inst->alg.cra_alignmask = alg->cra_alignmask;
inst->alg.cra_type = &crypto_hash_type;
inst->alg.cra_hash.digestsize =
(alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
alg->cra_digest.dia_digestsize;
inst->alg.cra_hash.digestsize = ds;
inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) +
ALIGN(inst->alg.cra_blocksize * 2 +
inst->alg.cra_hash.digestsize,
ALIGN(inst->alg.cra_blocksize * 2 + ds,
sizeof(void *));
inst->alg.cra_init = hmac_init_tfm;
......
......@@ -86,6 +86,7 @@ struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
int crypto_init_digest_ops(struct crypto_tfm *tfm);
int crypto_init_digest_ops_async(struct crypto_tfm *tfm);
int crypto_init_cipher_ops(struct crypto_tfm *tfm);
int crypto_init_compress_ops(struct crypto_tfm *tfm);
......
/*
* PRNG: Pseudo Random Number Generator
* Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using
* AES 128 cipher in RFC3686 ctr mode
*
* (C) Neil Horman <nhorman@tuxdriver.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* any later version.
*
*
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <linux/highmem.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
#include <linux/timex.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include "prng.h"
#define TEST_PRNG_ON_START 0
#define DEFAULT_PRNG_KEY "0123456789abcdef1011"
#define DEFAULT_PRNG_KSZ 20
#define DEFAULT_PRNG_IV "defaultv"
#define DEFAULT_PRNG_IVSZ 8
#define DEFAULT_BLK_SZ 16
#define DEFAULT_V_SEED "zaybxcwdveuftgsh"
/*
* Flags for the prng_context flags field
*/
#define PRNG_FIXED_SIZE 0x1
#define PRNG_NEED_RESET 0x2
/*
* Note: DT is our counter value
* I is our intermediate value
* V is our seed vector
* See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf
* for implementation details
*/
struct prng_context {
char *prng_key;
char *prng_iv;
spinlock_t prng_lock;
unsigned char rand_data[DEFAULT_BLK_SZ];
unsigned char last_rand_data[DEFAULT_BLK_SZ];
unsigned char DT[DEFAULT_BLK_SZ];
unsigned char I[DEFAULT_BLK_SZ];
unsigned char V[DEFAULT_BLK_SZ];
u32 rand_data_valid;
struct crypto_blkcipher *tfm;
u32 flags;
};
static int dbg;
static void hexdump(char *note, unsigned char *buf, unsigned int len)
{
if (dbg) {
printk(KERN_CRIT "%s", note);
print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
16, 1,
buf, len, false);
}
}
#define dbgprint(format, args...) do {if(dbg) printk(format, ##args);} while(0)
static void xor_vectors(unsigned char *in1, unsigned char *in2,
unsigned char *out, unsigned int size)
{
int i;
for (i=0;i<size;i++)
out[i] = in1[i] ^ in2[i];
}
/*
* Returns DEFAULT_BLK_SZ bytes of random data per call
* returns 0 if generation succeded, <0 if something went wrong
*/
static int _get_more_prng_bytes(struct prng_context *ctx)
{
int i;
struct blkcipher_desc desc;
struct scatterlist sg_in, sg_out;
int ret;
unsigned char tmp[DEFAULT_BLK_SZ];
desc.tfm = ctx->tfm;
desc.flags = 0;
dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n",ctx);
hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ);
hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ);
hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ);
/*
* This algorithm is a 3 stage state machine
*/
for (i=0;i<3;i++) {
desc.tfm = ctx->tfm;
desc.flags = 0;
switch (i) {
case 0:
/*
* Start by encrypting the counter value
* This gives us an intermediate value I
*/
memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ);
sg_init_one(&sg_out, &ctx->I[0], DEFAULT_BLK_SZ);
hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ);
break;
case 1:
/*
* Next xor I with our secret vector V
* encrypt that result to obtain our
* pseudo random data which we output
*/
xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ);
sg_init_one(&sg_out, &ctx->rand_data[0], DEFAULT_BLK_SZ);
hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ);
break;
case 2:
/*
* First check that we didn't produce the same random data
* that we did last time around through this
*/
if (!memcmp(ctx->rand_data, ctx->last_rand_data, DEFAULT_BLK_SZ)) {
printk(KERN_ERR "ctx %p Failed repetition check!\n",
ctx);
ctx->flags |= PRNG_NEED_RESET;
return -1;
}
memcpy(ctx->last_rand_data, ctx->rand_data, DEFAULT_BLK_SZ);
/*
* Lastly xor the random data with I
* and encrypt that to obtain a new secret vector V
*/
xor_vectors(ctx->rand_data, ctx->I, tmp, DEFAULT_BLK_SZ);
sg_init_one(&sg_out, &ctx->V[0], DEFAULT_BLK_SZ);
hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ);
break;
}
/* Initialize our input buffer */
sg_init_one(&sg_in, &tmp[0], DEFAULT_BLK_SZ);
/* do the encryption */
ret = crypto_blkcipher_encrypt(&desc, &sg_out, &sg_in, DEFAULT_BLK_SZ);
/* And check the result */
if (ret) {
dbgprint(KERN_CRIT "Encryption of new block failed for context %p\n",ctx);
ctx->rand_data_valid = DEFAULT_BLK_SZ;
return -1;
}
}
/*
* Now update our DT value
*/
for (i=DEFAULT_BLK_SZ-1;i>0;i--) {
ctx->DT[i] = ctx->DT[i-1];
}
ctx->DT[0] += 1;
dbgprint("Returning new block for context %p\n",ctx);
ctx->rand_data_valid = 0;
hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ);
hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ);
hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ);
hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ);
return 0;
}
/* Our exported functions */
int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx)
{
unsigned long flags;
unsigned char *ptr = buf;
unsigned int byte_count = (unsigned int)nbytes;
int err;
if (nbytes < 0)
return -EINVAL;
spin_lock_irqsave(&ctx->prng_lock, flags);
err = -EFAULT;
if (ctx->flags & PRNG_NEED_RESET)
goto done;
/*
* If the FIXED_SIZE flag is on, only return whole blocks of
* pseudo random data
*/
err = -EINVAL;
if (ctx->flags & PRNG_FIXED_SIZE) {
if (nbytes < DEFAULT_BLK_SZ)
goto done;
byte_count = DEFAULT_BLK_SZ;
}
err = byte_count;
dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",byte_count, ctx);
remainder:
if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
if (_get_more_prng_bytes(ctx) < 0) {
memset(buf, 0, nbytes);
err = -EFAULT;
goto done;
}
}
/*
* Copy up to the next whole block size
*/
if (byte_count < DEFAULT_BLK_SZ) {
for (;ctx->rand_data_valid < DEFAULT_BLK_SZ; ctx->rand_data_valid++) {
*ptr = ctx->rand_data[ctx->rand_data_valid];
ptr++;
byte_count--;
if (byte_count == 0)
goto done;
}
}
/*
* Now copy whole blocks
*/
for(;byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
if (_get_more_prng_bytes(ctx) < 0) {
memset(buf, 0, nbytes);
err = -1;
goto done;
}
memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ);
ctx->rand_data_valid += DEFAULT_BLK_SZ;
ptr += DEFAULT_BLK_SZ;
}
/*
* Now copy any extra partial data
*/
if (byte_count)
goto remainder;
done:
spin_unlock_irqrestore(&ctx->prng_lock, flags);
dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",err, ctx);
return err;
}
EXPORT_SYMBOL_GPL(get_prng_bytes);
struct prng_context *alloc_prng_context(void)
{
struct prng_context *ctx=kzalloc(sizeof(struct prng_context), GFP_KERNEL);
spin_lock_init(&ctx->prng_lock);
if (reset_prng_context(ctx, NULL, NULL, NULL, NULL)) {
kfree(ctx);
ctx = NULL;
}
dbgprint(KERN_CRIT "returning context %p\n",ctx);
return ctx;
}
EXPORT_SYMBOL_GPL(alloc_prng_context);
void free_prng_context(struct prng_context *ctx)
{
crypto_free_blkcipher(ctx->tfm);
kfree(ctx);
}
EXPORT_SYMBOL_GPL(free_prng_context);
int reset_prng_context(struct prng_context *ctx,
unsigned char *key, unsigned char *iv,
unsigned char *V, unsigned char *DT)
{
int ret;
int iv_len;
int rc = -EFAULT;
spin_lock(&ctx->prng_lock);
ctx->flags |= PRNG_NEED_RESET;
if (key)
memcpy(ctx->prng_key,key,strlen(ctx->prng_key));
else
ctx->prng_key = DEFAULT_PRNG_KEY;
if (iv)
memcpy(ctx->prng_iv,iv, strlen(ctx->prng_iv));
else
ctx->prng_iv = DEFAULT_PRNG_IV;
if (V)
memcpy(ctx->V,V,DEFAULT_BLK_SZ);
else
memcpy(ctx->V,DEFAULT_V_SEED,DEFAULT_BLK_SZ);
if (DT)
memcpy(ctx->DT, DT, DEFAULT_BLK_SZ);
else
memset(ctx->DT, 0, DEFAULT_BLK_SZ);
memset(ctx->rand_data,0,DEFAULT_BLK_SZ);
memset(ctx->last_rand_data,0,DEFAULT_BLK_SZ);
if (ctx->tfm)
crypto_free_blkcipher(ctx->tfm);
ctx->tfm = crypto_alloc_blkcipher("rfc3686(ctr(aes))",0,0);
if (!ctx->tfm) {
dbgprint(KERN_CRIT "Failed to alloc crypto tfm for context %p\n",ctx->tfm);
goto out;
}
ctx->rand_data_valid = DEFAULT_BLK_SZ;
ret = crypto_blkcipher_setkey(ctx->tfm, ctx->prng_key, strlen(ctx->prng_key));
if (ret) {
dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
crypto_blkcipher_get_flags(ctx->tfm));
crypto_free_blkcipher(ctx->tfm);
goto out;
}
iv_len = crypto_blkcipher_ivsize(ctx->tfm);
if (iv_len) {
crypto_blkcipher_set_iv(ctx->tfm, ctx->prng_iv, iv_len);
}
rc = 0;
ctx->flags &= ~PRNG_NEED_RESET;
out:
spin_unlock(&ctx->prng_lock);
return rc;
}
EXPORT_SYMBOL_GPL(reset_prng_context);
/* Module initalization */
static int __init prng_mod_init(void)
{
#ifdef TEST_PRNG_ON_START
int i;
unsigned char tmpbuf[DEFAULT_BLK_SZ];
struct prng_context *ctx = alloc_prng_context();
if (ctx == NULL)
return -EFAULT;
for (i=0;i<16;i++) {
if (get_prng_bytes(tmpbuf, DEFAULT_BLK_SZ, ctx) < 0) {
free_prng_context(ctx);
return -EFAULT;
}
}
free_prng_context(ctx);
#endif
return 0;
}
static void __exit prng_mod_fini(void)
{
return;
}
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
module_param(dbg, int, 0);
MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
module_init(prng_mod_init);
module_exit(prng_mod_fini);
/*
* PRNG: Pseudo Random Number Generator
*
* (C) Neil Horman <nhorman@tuxdriver.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* any later version.
*
*
*/
#ifndef _PRNG_H_
#define _PRNG_H_
struct prng_context;
int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx);
struct prng_context *alloc_prng_context(void);
int reset_prng_context(struct prng_context *ctx,
unsigned char *key, unsigned char *iv,
unsigned char *V,
unsigned char *DT);
void free_prng_context(struct prng_context *ctx);
#endif
/*
* Common values for RIPEMD algorithms
*/
#ifndef _CRYPTO_RMD_H
#define _CRYPTO_RMD_H
#define RMD128_DIGEST_SIZE 16
#define RMD128_BLOCK_SIZE 64
#define RMD160_DIGEST_SIZE 20
#define RMD160_BLOCK_SIZE 64
#define RMD256_DIGEST_SIZE 32
#define RMD256_BLOCK_SIZE 64
#define RMD320_DIGEST_SIZE 40
#define RMD320_BLOCK_SIZE 64
/* initial values */
#define RMD_H0 0x67452301UL
#define RMD_H1 0xefcdab89UL
#define RMD_H2 0x98badcfeUL
#define RMD_H3 0x10325476UL
#define RMD_H4 0xc3d2e1f0UL
#define RMD_H5 0x76543210UL
#define RMD_H6 0xfedcba98UL
#define RMD_H7 0x89abcdefUL
#define RMD_H8 0x01234567UL
#define RMD_H9 0x3c2d1e0fUL
/* constants */
#define RMD_K1 0x00000000UL
#define RMD_K2 0x5a827999UL
#define RMD_K3 0x6ed9eba1UL
#define RMD_K4 0x8f1bbcdcUL
#define RMD_K5 0xa953fd4eUL
#define RMD_K6 0x50a28be6UL
#define RMD_K7 0x5c4dd124UL
#define RMD_K8 0x6d703ef3UL
#define RMD_K9 0x7a6d76e9UL
#endif
/*
* Cryptographic API.
*
* RIPEMD-128 - RACE Integrity Primitives Evaluation Message Digest.
*
* Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
*
* Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include "ripemd.h"
struct rmd128_ctx {
u64 byte_count;
u32 state[4];
__le32 buffer[16];
};
#define K1 RMD_K1
#define K2 RMD_K2
#define K3 RMD_K3
#define K4 RMD_K4
#define KK1 RMD_K6
#define KK2 RMD_K7
#define KK3 RMD_K8
#define KK4 RMD_K1
#define F1(x, y, z) (x ^ y ^ z) /* XOR */
#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
#define F3(x, y, z) ((x | ~y) ^ z)
#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
#define ROUND(a, b, c, d, f, k, x, s) { \
(a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
(a) = rol32((a), (s)); \
}
static void rmd128_transform(u32 *state, const __le32 *in)
{
u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd;
/* Initialize left lane */
aa = state[0];
bb = state[1];
cc = state[2];
dd = state[3];
/* Initialize right lane */
aaa = state[0];
bbb = state[1];
ccc = state[2];
ddd = state[3];
/* round 1: left lane */
ROUND(aa, bb, cc, dd, F1, K1, in[0], 11);
ROUND(dd, aa, bb, cc, F1, K1, in[1], 14);
ROUND(cc, dd, aa, bb, F1, K1, in[2], 15);
ROUND(bb, cc, dd, aa, F1, K1, in[3], 12);
ROUND(aa, bb, cc, dd, F1, K1, in[4], 5);
ROUND(dd, aa, bb, cc, F1, K1, in[5], 8);
ROUND(cc, dd, aa, bb, F1, K1, in[6], 7);
ROUND(bb, cc, dd, aa, F1, K1, in[7], 9);
ROUND(aa, bb, cc, dd, F1, K1, in[8], 11);
ROUND(dd, aa, bb, cc, F1, K1, in[9], 13);
ROUND(cc, dd, aa, bb, F1, K1, in[10], 14);
ROUND(bb, cc, dd, aa, F1, K1, in[11], 15);
ROUND(aa, bb, cc, dd, F1, K1, in[12], 6);
ROUND(dd, aa, bb, cc, F1, K1, in[13], 7);
ROUND(cc, dd, aa, bb, F1, K1, in[14], 9);
ROUND(bb, cc, dd, aa, F1, K1, in[15], 8);
/* round 2: left lane */
ROUND(aa, bb, cc, dd, F2, K2, in[7], 7);
ROUND(dd, aa, bb, cc, F2, K2, in[4], 6);
ROUND(cc, dd, aa, bb, F2, K2, in[13], 8);
ROUND(bb, cc, dd, aa, F2, K2, in[1], 13);
ROUND(aa, bb, cc, dd, F2, K2, in[10], 11);
ROUND(dd, aa, bb, cc, F2, K2, in[6], 9);
ROUND(cc, dd, aa, bb, F2, K2, in[15], 7);
ROUND(bb, cc, dd, aa, F2, K2, in[3], 15);
ROUND(aa, bb, cc, dd, F2, K2, in[12], 7);
ROUND(dd, aa, bb, cc, F2, K2, in[0], 12);
ROUND(cc, dd, aa, bb, F2, K2, in[9], 15);
ROUND(bb, cc, dd, aa, F2, K2, in[5], 9);
ROUND(aa, bb, cc, dd, F2, K2, in[2], 11);
ROUND(dd, aa, bb, cc, F2, K2, in[14], 7);
ROUND(cc, dd, aa, bb, F2, K2, in[11], 13);
ROUND(bb, cc, dd, aa, F2, K2, in[8], 12);
/* round 3: left lane */
ROUND(aa, bb, cc, dd, F3, K3, in[3], 11);
ROUND(dd, aa, bb, cc, F3, K3, in[10], 13);
ROUND(cc, dd, aa, bb, F3, K3, in[14], 6);
ROUND(bb, cc, dd, aa, F3, K3, in[4], 7);
ROUND(aa, bb, cc, dd, F3, K3, in[9], 14);
ROUND(dd, aa, bb, cc, F3, K3, in[15], 9);
ROUND(cc, dd, aa, bb, F3, K3, in[8], 13);
ROUND(bb, cc, dd, aa, F3, K3, in[1], 15);
ROUND(aa, bb, cc, dd, F3, K3, in[2], 14);
ROUND(dd, aa, bb, cc, F3, K3, in[7], 8);
ROUND(cc, dd, aa, bb, F3, K3, in[0], 13);
ROUND(bb, cc, dd, aa, F3, K3, in[6], 6);
ROUND(aa, bb, cc, dd, F3, K3, in[13], 5);
ROUND(dd, aa, bb, cc, F3, K3, in[11], 12);
ROUND(cc, dd, aa, bb, F3, K3, in[5], 7);
ROUND(bb, cc, dd, aa, F3, K3, in[12], 5);
/* round 4: left lane */
ROUND(aa, bb, cc, dd, F4, K4, in[1], 11);
ROUND(dd, aa, bb, cc, F4, K4, in[9], 12);
ROUND(cc, dd, aa, bb, F4, K4, in[11], 14);
ROUND(bb, cc, dd, aa, F4, K4, in[10], 15);
ROUND(aa, bb, cc, dd, F4, K4, in[0], 14);
ROUND(dd, aa, bb, cc, F4, K4, in[8], 15);
ROUND(cc, dd, aa, bb, F4, K4, in[12], 9);
ROUND(bb, cc, dd, aa, F4, K4, in[4], 8);
ROUND(aa, bb, cc, dd, F4, K4, in[13], 9);
ROUND(dd, aa, bb, cc, F4, K4, in[3], 14);
ROUND(cc, dd, aa, bb, F4, K4, in[7], 5);
ROUND(bb, cc, dd, aa, F4, K4, in[15], 6);
ROUND(aa, bb, cc, dd, F4, K4, in[14], 8);
ROUND(dd, aa, bb, cc, F4, K4, in[5], 6);
ROUND(cc, dd, aa, bb, F4, K4, in[6], 5);
ROUND(bb, cc, dd, aa, F4, K4, in[2], 12);
/* round 1: right lane */
ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8);
ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9);
ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9);
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11);
ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13);
ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15);
ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15);
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5);
ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7);
ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7);
ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8);
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11);
ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14);
ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14);
ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12);
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6);
/* round 2: right lane */
ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9);
ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13);
ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15);
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7);
ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12);
ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8);
ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9);
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11);
ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7);
ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7);
ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12);
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7);
ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6);
ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15);
ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13);
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11);
/* round 3: right lane */
ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9);
ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7);
ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15);
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11);
ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8);
ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6);
ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6);
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14);
ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12);
ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13);
ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5);
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14);
ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13);
ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13);
ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7);
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5);
/* round 4: right lane */
ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15);
ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5);
ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8);
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11);
ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14);
ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14);
ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6);
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14);
ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6);
ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9);
ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12);
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9);
ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12);
ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5);
ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15);
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8);
/* combine results */
ddd += cc + state[1]; /* final result for state[0] */
state[1] = state[2] + dd + aaa;
state[2] = state[3] + aa + bbb;
state[3] = state[0] + bb + ccc;
state[0] = ddd;
return;
}
static void rmd128_init(struct crypto_tfm *tfm)
{
struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
rctx->byte_count = 0;
rctx->state[0] = RMD_H0;
rctx->state[1] = RMD_H1;
rctx->state[2] = RMD_H2;
rctx->state[3] = RMD_H3;
memset(rctx->buffer, 0, sizeof(rctx->buffer));
}
static void rmd128_update(struct crypto_tfm *tfm, const u8 *data,
unsigned int len)
{
struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
rctx->byte_count += len;
/* Enough space in buffer? If so copy and we're done */
if (avail > len) {
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, len);
return;
}
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, avail);
rmd128_transform(rctx->state, rctx->buffer);
data += avail;
len -= avail;
while (len >= sizeof(rctx->buffer)) {
memcpy(rctx->buffer, data, sizeof(rctx->buffer));
rmd128_transform(rctx->state, rctx->buffer);
data += sizeof(rctx->buffer);
len -= sizeof(rctx->buffer);
}
memcpy(rctx->buffer, data, len);
}
/* Add padding and return the message digest. */
static void rmd128_final(struct crypto_tfm *tfm, u8 *out)
{
struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
u32 i, index, padlen;
__le64 bits;
__le32 *dst = (__le32 *)out;
static const u8 padding[64] = { 0x80, };
bits = cpu_to_le64(rctx->byte_count << 3);
/* Pad out to 56 mod 64 */
index = rctx->byte_count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
rmd128_update(tfm, padding, padlen);
/* Append length */
rmd128_update(tfm, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 4; i++)
dst[i] = cpu_to_le32p(&rctx->state[i]);
/* Wipe context */
memset(rctx, 0, sizeof(*rctx));
}
static struct crypto_alg alg = {
.cra_name = "rmd128",
.cra_driver_name = "rmd128",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = RMD128_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rmd128_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = RMD128_DIGEST_SIZE,
.dia_init = rmd128_init,
.dia_update = rmd128_update,
.dia_final = rmd128_final } }
};
static int __init rmd128_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit rmd128_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(rmd128_mod_init);
module_exit(rmd128_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
MODULE_ALIAS("rmd128");
/*
* Cryptographic API.
*
* RIPEMD-160 - RACE Integrity Primitives Evaluation Message Digest.
*
* Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
*
* Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include "ripemd.h"
struct rmd160_ctx {
u64 byte_count;
u32 state[5];
__le32 buffer[16];
};
#define K1 RMD_K1
#define K2 RMD_K2
#define K3 RMD_K3
#define K4 RMD_K4
#define K5 RMD_K5
#define KK1 RMD_K6
#define KK2 RMD_K7
#define KK3 RMD_K8
#define KK4 RMD_K9
#define KK5 RMD_K1
#define F1(x, y, z) (x ^ y ^ z) /* XOR */
#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
#define F3(x, y, z) ((x | ~y) ^ z)
#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
#define F5(x, y, z) (x ^ (y | ~z))
#define ROUND(a, b, c, d, e, f, k, x, s) { \
(a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
(a) = rol32((a), (s)) + (e); \
(c) = rol32((c), 10); \
}
static void rmd160_transform(u32 *state, const __le32 *in)
{
u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee;
/* Initialize left lane */
aa = state[0];
bb = state[1];
cc = state[2];
dd = state[3];
ee = state[4];
/* Initialize right lane */
aaa = state[0];
bbb = state[1];
ccc = state[2];
ddd = state[3];
eee = state[4];
/* round 1: left lane */
ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11);
ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14);
ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15);
ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12);
ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5);
ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8);
ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7);
ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9);
ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11);
ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13);
ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14);
ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15);
ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6);
ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7);
ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9);
ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8);
/* round 2: left lane" */
ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7);
ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6);
ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8);
ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13);
ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11);
ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9);
ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7);
ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15);
ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7);
ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12);
ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15);
ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9);
ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11);
ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7);
ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13);
ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12);
/* round 3: left lane" */
ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11);
ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13);
ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6);
ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7);
ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14);
ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9);
ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13);
ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15);
ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14);
ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8);
ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13);
ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6);
ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5);
ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12);
ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7);
ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5);
/* round 4: left lane" */
ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11);
ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12);
ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14);
ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15);
ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14);
ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15);
ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9);
ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8);
ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9);
ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14);
ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5);
ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6);
ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8);
ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6);
ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5);
ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12);
/* round 5: left lane" */
ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9);
ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15);
ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5);
ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11);
ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6);
ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8);
ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13);
ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12);
ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5);
ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12);
ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13);
ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14);
ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11);
ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8);
ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5);
ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6);
/* round 1: right lane */
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8);
ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9);
ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11);
ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13);
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15);
ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15);
ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5);
ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7);
ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7);
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8);
ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11);
ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14);
ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14);
ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12);
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6);
/* round 2: right lane */
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9);
ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13);
ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15);
ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7);
ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12);
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8);
ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11);
ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7);
ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7);
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12);
ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7);
ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6);
ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15);
ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13);
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11);
/* round 3: right lane */
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7);
ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15);
ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11);
ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8);
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6);
ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6);
ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14);
ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12);
ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13);
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5);
ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14);
ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13);
ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13);
ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7);
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5);
/* round 4: right lane */
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15);
ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5);
ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8);
ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11);
ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14);
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14);
ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6);
ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14);
ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6);
ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12);
ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9);
ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12);
ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5);
ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15);
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8);
/* round 5: right lane */
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8);
ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5);
ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12);
ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12);
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5);
ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14);
ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6);
ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8);
ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13);
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6);
ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5);
ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15);
ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13);
ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11);
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
/* combine results */
ddd += cc + state[1]; /* final result for state[0] */
state[1] = state[2] + dd + eee;
state[2] = state[3] + ee + aaa;
state[3] = state[4] + aa + bbb;
state[4] = state[0] + bb + ccc;
state[0] = ddd;
return;
}
static void rmd160_init(struct crypto_tfm *tfm)
{
struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
rctx->byte_count = 0;
rctx->state[0] = RMD_H0;
rctx->state[1] = RMD_H1;
rctx->state[2] = RMD_H2;
rctx->state[3] = RMD_H3;
rctx->state[4] = RMD_H4;
memset(rctx->buffer, 0, sizeof(rctx->buffer));
}
static void rmd160_update(struct crypto_tfm *tfm, const u8 *data,
unsigned int len)
{
struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
rctx->byte_count += len;
/* Enough space in buffer? If so copy and we're done */
if (avail > len) {
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, len);
return;
}
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, avail);
rmd160_transform(rctx->state, rctx->buffer);
data += avail;
len -= avail;
while (len >= sizeof(rctx->buffer)) {
memcpy(rctx->buffer, data, sizeof(rctx->buffer));
rmd160_transform(rctx->state, rctx->buffer);
data += sizeof(rctx->buffer);
len -= sizeof(rctx->buffer);
}
memcpy(rctx->buffer, data, len);
}
/* Add padding and return the message digest. */
static void rmd160_final(struct crypto_tfm *tfm, u8 *out)
{
struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
u32 i, index, padlen;
__le64 bits;
__le32 *dst = (__le32 *)out;
static const u8 padding[64] = { 0x80, };
bits = cpu_to_le64(rctx->byte_count << 3);
/* Pad out to 56 mod 64 */
index = rctx->byte_count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
rmd160_update(tfm, padding, padlen);
/* Append length */
rmd160_update(tfm, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_le32p(&rctx->state[i]);
/* Wipe context */
memset(rctx, 0, sizeof(*rctx));
}
static struct crypto_alg alg = {
.cra_name = "rmd160",
.cra_driver_name = "rmd160",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = RMD160_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rmd160_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = RMD160_DIGEST_SIZE,
.dia_init = rmd160_init,
.dia_update = rmd160_update,
.dia_final = rmd160_final } }
};
static int __init rmd160_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit rmd160_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(rmd160_mod_init);
module_exit(rmd160_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
MODULE_ALIAS("rmd160");
/*
* Cryptographic API.
*
* RIPEMD-256 - RACE Integrity Primitives Evaluation Message Digest.
*
* Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
*
* Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include "ripemd.h"
struct rmd256_ctx {
u64 byte_count;
u32 state[8];
__le32 buffer[16];
};
#define K1 RMD_K1
#define K2 RMD_K2
#define K3 RMD_K3
#define K4 RMD_K4
#define KK1 RMD_K6
#define KK2 RMD_K7
#define KK3 RMD_K8
#define KK4 RMD_K1
#define F1(x, y, z) (x ^ y ^ z) /* XOR */
#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
#define F3(x, y, z) ((x | ~y) ^ z)
#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
#define ROUND(a, b, c, d, f, k, x, s) { \
(a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
(a) = rol32((a), (s)); \
}
static void rmd256_transform(u32 *state, const __le32 *in)
{
u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd, tmp;
/* Initialize left lane */
aa = state[0];
bb = state[1];
cc = state[2];
dd = state[3];
/* Initialize right lane */
aaa = state[4];
bbb = state[5];
ccc = state[6];
ddd = state[7];
/* round 1: left lane */
ROUND(aa, bb, cc, dd, F1, K1, in[0], 11);
ROUND(dd, aa, bb, cc, F1, K1, in[1], 14);
ROUND(cc, dd, aa, bb, F1, K1, in[2], 15);
ROUND(bb, cc, dd, aa, F1, K1, in[3], 12);
ROUND(aa, bb, cc, dd, F1, K1, in[4], 5);
ROUND(dd, aa, bb, cc, F1, K1, in[5], 8);
ROUND(cc, dd, aa, bb, F1, K1, in[6], 7);
ROUND(bb, cc, dd, aa, F1, K1, in[7], 9);
ROUND(aa, bb, cc, dd, F1, K1, in[8], 11);
ROUND(dd, aa, bb, cc, F1, K1, in[9], 13);
ROUND(cc, dd, aa, bb, F1, K1, in[10], 14);
ROUND(bb, cc, dd, aa, F1, K1, in[11], 15);
ROUND(aa, bb, cc, dd, F1, K1, in[12], 6);
ROUND(dd, aa, bb, cc, F1, K1, in[13], 7);
ROUND(cc, dd, aa, bb, F1, K1, in[14], 9);
ROUND(bb, cc, dd, aa, F1, K1, in[15], 8);
/* round 1: right lane */
ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8);
ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9);
ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9);
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11);
ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13);
ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15);
ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15);
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5);
ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7);
ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7);
ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8);
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11);
ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14);
ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14);
ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12);
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6);
/* Swap contents of "a" registers */
tmp = aa; aa = aaa; aaa = tmp;
/* round 2: left lane */
ROUND(aa, bb, cc, dd, F2, K2, in[7], 7);
ROUND(dd, aa, bb, cc, F2, K2, in[4], 6);
ROUND(cc, dd, aa, bb, F2, K2, in[13], 8);
ROUND(bb, cc, dd, aa, F2, K2, in[1], 13);
ROUND(aa, bb, cc, dd, F2, K2, in[10], 11);
ROUND(dd, aa, bb, cc, F2, K2, in[6], 9);
ROUND(cc, dd, aa, bb, F2, K2, in[15], 7);
ROUND(bb, cc, dd, aa, F2, K2, in[3], 15);
ROUND(aa, bb, cc, dd, F2, K2, in[12], 7);
ROUND(dd, aa, bb, cc, F2, K2, in[0], 12);
ROUND(cc, dd, aa, bb, F2, K2, in[9], 15);
ROUND(bb, cc, dd, aa, F2, K2, in[5], 9);
ROUND(aa, bb, cc, dd, F2, K2, in[2], 11);
ROUND(dd, aa, bb, cc, F2, K2, in[14], 7);
ROUND(cc, dd, aa, bb, F2, K2, in[11], 13);
ROUND(bb, cc, dd, aa, F2, K2, in[8], 12);
/* round 2: right lane */
ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9);
ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13);
ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15);
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7);
ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12);
ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8);
ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9);
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11);
ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7);
ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7);
ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12);
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7);
ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6);
ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15);
ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13);
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11);
/* Swap contents of "b" registers */
tmp = bb; bb = bbb; bbb = tmp;
/* round 3: left lane */
ROUND(aa, bb, cc, dd, F3, K3, in[3], 11);
ROUND(dd, aa, bb, cc, F3, K3, in[10], 13);
ROUND(cc, dd, aa, bb, F3, K3, in[14], 6);
ROUND(bb, cc, dd, aa, F3, K3, in[4], 7);
ROUND(aa, bb, cc, dd, F3, K3, in[9], 14);
ROUND(dd, aa, bb, cc, F3, K3, in[15], 9);
ROUND(cc, dd, aa, bb, F3, K3, in[8], 13);
ROUND(bb, cc, dd, aa, F3, K3, in[1], 15);
ROUND(aa, bb, cc, dd, F3, K3, in[2], 14);
ROUND(dd, aa, bb, cc, F3, K3, in[7], 8);
ROUND(cc, dd, aa, bb, F3, K3, in[0], 13);
ROUND(bb, cc, dd, aa, F3, K3, in[6], 6);
ROUND(aa, bb, cc, dd, F3, K3, in[13], 5);
ROUND(dd, aa, bb, cc, F3, K3, in[11], 12);
ROUND(cc, dd, aa, bb, F3, K3, in[5], 7);
ROUND(bb, cc, dd, aa, F3, K3, in[12], 5);
/* round 3: right lane */
ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9);
ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7);
ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15);
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11);
ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8);
ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6);
ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6);
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14);
ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12);
ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13);
ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5);
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14);
ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13);
ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13);
ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7);
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5);
/* Swap contents of "c" registers */
tmp = cc; cc = ccc; ccc = tmp;
/* round 4: left lane */
ROUND(aa, bb, cc, dd, F4, K4, in[1], 11);
ROUND(dd, aa, bb, cc, F4, K4, in[9], 12);
ROUND(cc, dd, aa, bb, F4, K4, in[11], 14);
ROUND(bb, cc, dd, aa, F4, K4, in[10], 15);
ROUND(aa, bb, cc, dd, F4, K4, in[0], 14);
ROUND(dd, aa, bb, cc, F4, K4, in[8], 15);
ROUND(cc, dd, aa, bb, F4, K4, in[12], 9);
ROUND(bb, cc, dd, aa, F4, K4, in[4], 8);
ROUND(aa, bb, cc, dd, F4, K4, in[13], 9);
ROUND(dd, aa, bb, cc, F4, K4, in[3], 14);
ROUND(cc, dd, aa, bb, F4, K4, in[7], 5);
ROUND(bb, cc, dd, aa, F4, K4, in[15], 6);
ROUND(aa, bb, cc, dd, F4, K4, in[14], 8);
ROUND(dd, aa, bb, cc, F4, K4, in[5], 6);
ROUND(cc, dd, aa, bb, F4, K4, in[6], 5);
ROUND(bb, cc, dd, aa, F4, K4, in[2], 12);
/* round 4: right lane */
ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15);
ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5);
ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8);
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11);
ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14);
ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14);
ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6);
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14);
ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6);
ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9);
ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12);
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9);
ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12);
ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5);
ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15);
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8);
/* Swap contents of "d" registers */
tmp = dd; dd = ddd; ddd = tmp;
/* combine results */
state[0] += aa;
state[1] += bb;
state[2] += cc;
state[3] += dd;
state[4] += aaa;
state[5] += bbb;
state[6] += ccc;
state[7] += ddd;
return;
}
static void rmd256_init(struct crypto_tfm *tfm)
{
struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
rctx->byte_count = 0;
rctx->state[0] = RMD_H0;
rctx->state[1] = RMD_H1;
rctx->state[2] = RMD_H2;
rctx->state[3] = RMD_H3;
rctx->state[4] = RMD_H5;
rctx->state[5] = RMD_H6;
rctx->state[6] = RMD_H7;
rctx->state[7] = RMD_H8;
memset(rctx->buffer, 0, sizeof(rctx->buffer));
}
static void rmd256_update(struct crypto_tfm *tfm, const u8 *data,
unsigned int len)
{
struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
rctx->byte_count += len;
/* Enough space in buffer? If so copy and we're done */
if (avail > len) {
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, len);
return;
}
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, avail);
rmd256_transform(rctx->state, rctx->buffer);
data += avail;
len -= avail;
while (len >= sizeof(rctx->buffer)) {
memcpy(rctx->buffer, data, sizeof(rctx->buffer));
rmd256_transform(rctx->state, rctx->buffer);
data += sizeof(rctx->buffer);
len -= sizeof(rctx->buffer);
}
memcpy(rctx->buffer, data, len);
}
/* Add padding and return the message digest. */
static void rmd256_final(struct crypto_tfm *tfm, u8 *out)
{
struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
u32 i, index, padlen;
__le64 bits;
__le32 *dst = (__le32 *)out;
static const u8 padding[64] = { 0x80, };
bits = cpu_to_le64(rctx->byte_count << 3);
/* Pad out to 56 mod 64 */
index = rctx->byte_count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
rmd256_update(tfm, padding, padlen);
/* Append length */
rmd256_update(tfm, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 8; i++)
dst[i] = cpu_to_le32p(&rctx->state[i]);
/* Wipe context */
memset(rctx, 0, sizeof(*rctx));
}
static struct crypto_alg alg = {
.cra_name = "rmd256",
.cra_driver_name = "rmd256",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = RMD256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rmd256_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = RMD256_DIGEST_SIZE,
.dia_init = rmd256_init,
.dia_update = rmd256_update,
.dia_final = rmd256_final } }
};
static int __init rmd256_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit rmd256_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(rmd256_mod_init);
module_exit(rmd256_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
MODULE_ALIAS("rmd256");
/*
* Cryptographic API.
*
* RIPEMD-320 - RACE Integrity Primitives Evaluation Message Digest.
*
* Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
*
* Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include "ripemd.h"
struct rmd320_ctx {
u64 byte_count;
u32 state[10];
__le32 buffer[16];
};
#define K1 RMD_K1
#define K2 RMD_K2
#define K3 RMD_K3
#define K4 RMD_K4
#define K5 RMD_K5
#define KK1 RMD_K6
#define KK2 RMD_K7
#define KK3 RMD_K8
#define KK4 RMD_K9
#define KK5 RMD_K1
#define F1(x, y, z) (x ^ y ^ z) /* XOR */
#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
#define F3(x, y, z) ((x | ~y) ^ z)
#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
#define F5(x, y, z) (x ^ (y | ~z))
#define ROUND(a, b, c, d, e, f, k, x, s) { \
(a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
(a) = rol32((a), (s)) + (e); \
(c) = rol32((c), 10); \
}
static void rmd320_transform(u32 *state, const __le32 *in)
{
u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee, tmp;
/* Initialize left lane */
aa = state[0];
bb = state[1];
cc = state[2];
dd = state[3];
ee = state[4];
/* Initialize right lane */
aaa = state[5];
bbb = state[6];
ccc = state[7];
ddd = state[8];
eee = state[9];
/* round 1: left lane */
ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11);
ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14);
ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15);
ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12);
ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5);
ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8);
ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7);
ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9);
ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11);
ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13);
ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14);
ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15);
ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6);
ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7);
ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9);
ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8);
/* round 1: right lane */
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8);
ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9);
ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11);
ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13);
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15);
ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15);
ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5);
ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7);
ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7);
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8);
ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11);
ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14);
ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14);
ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12);
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6);
/* Swap contents of "a" registers */
tmp = aa; aa = aaa; aaa = tmp;
/* round 2: left lane" */
ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7);
ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6);
ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8);
ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13);
ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11);
ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9);
ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7);
ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15);
ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7);
ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12);
ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15);
ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9);
ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11);
ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7);
ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13);
ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12);
/* round 2: right lane */
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9);
ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13);
ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15);
ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7);
ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12);
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8);
ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11);
ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7);
ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7);
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12);
ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7);
ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6);
ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15);
ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13);
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11);
/* Swap contents of "b" registers */
tmp = bb; bb = bbb; bbb = tmp;
/* round 3: left lane" */
ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11);
ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13);
ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6);
ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7);
ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14);
ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9);
ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13);
ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15);
ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14);
ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8);
ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13);
ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6);
ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5);
ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12);
ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7);
ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5);
/* round 3: right lane */
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7);
ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15);
ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11);
ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8);
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6);
ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6);
ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14);
ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12);
ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13);
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5);
ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14);
ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13);
ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13);
ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7);
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5);
/* Swap contents of "c" registers */
tmp = cc; cc = ccc; ccc = tmp;
/* round 4: left lane" */
ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11);
ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12);
ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14);
ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15);
ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14);
ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15);
ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9);
ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8);
ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9);
ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14);
ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5);
ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6);
ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8);
ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6);
ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5);
ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12);
/* round 4: right lane */
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15);
ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5);
ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8);
ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11);
ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14);
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14);
ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6);
ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14);
ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6);
ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12);
ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9);
ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12);
ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5);
ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15);
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8);
/* Swap contents of "d" registers */
tmp = dd; dd = ddd; ddd = tmp;
/* round 5: left lane" */
ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9);
ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15);
ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5);
ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11);
ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6);
ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8);
ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13);
ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12);
ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5);
ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12);
ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13);
ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14);
ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11);
ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8);
ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5);
ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6);
/* round 5: right lane */
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8);
ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5);
ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12);
ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12);
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5);
ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14);
ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6);
ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8);
ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13);
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6);
ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5);
ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15);
ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13);
ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11);
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
/* Swap contents of "e" registers */
tmp = ee; ee = eee; eee = tmp;
/* combine results */
state[0] += aa;
state[1] += bb;
state[2] += cc;
state[3] += dd;
state[4] += ee;
state[5] += aaa;
state[6] += bbb;
state[7] += ccc;
state[8] += ddd;
state[9] += eee;
return;
}
static void rmd320_init(struct crypto_tfm *tfm)
{
struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
rctx->byte_count = 0;
rctx->state[0] = RMD_H0;
rctx->state[1] = RMD_H1;
rctx->state[2] = RMD_H2;
rctx->state[3] = RMD_H3;
rctx->state[4] = RMD_H4;
rctx->state[5] = RMD_H5;
rctx->state[6] = RMD_H6;
rctx->state[7] = RMD_H7;
rctx->state[8] = RMD_H8;
rctx->state[9] = RMD_H9;
memset(rctx->buffer, 0, sizeof(rctx->buffer));
}
static void rmd320_update(struct crypto_tfm *tfm, const u8 *data,
unsigned int len)
{
struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
rctx->byte_count += len;
/* Enough space in buffer? If so copy and we're done */
if (avail > len) {
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, len);
return;
}
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, avail);
rmd320_transform(rctx->state, rctx->buffer);
data += avail;
len -= avail;
while (len >= sizeof(rctx->buffer)) {
memcpy(rctx->buffer, data, sizeof(rctx->buffer));
rmd320_transform(rctx->state, rctx->buffer);
data += sizeof(rctx->buffer);
len -= sizeof(rctx->buffer);
}
memcpy(rctx->buffer, data, len);
}
/* Add padding and return the message digest. */
static void rmd320_final(struct crypto_tfm *tfm, u8 *out)
{
struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
u32 i, index, padlen;
__le64 bits;
__le32 *dst = (__le32 *)out;
static const u8 padding[64] = { 0x80, };
bits = cpu_to_le64(rctx->byte_count << 3);
/* Pad out to 56 mod 64 */
index = rctx->byte_count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
rmd320_update(tfm, padding, padlen);
/* Append length */
rmd320_update(tfm, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 10; i++)
dst[i] = cpu_to_le32p(&rctx->state[i]);
/* Wipe context */
memset(rctx, 0, sizeof(*rctx));
}
static struct crypto_alg alg = {
.cra_name = "rmd320",
.cra_driver_name = "rmd320",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = RMD320_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rmd320_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = RMD320_DIGEST_SIZE,
.dia_init = rmd320_init,
.dia_update = rmd320_update,
.dia_final = rmd320_final } }
};
static int __init rmd320_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit rmd320_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(rmd320_mod_init);
module_exit(rmd320_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
MODULE_ALIAS("rmd320");
......@@ -13,15 +13,9 @@
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* 2007-11-13 Added GCM tests
* 2007-11-13 Added AEAD support
* 2007-11-06 Added SHA-224 and SHA-224-HMAC tests
* 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
* 2004-08-09 Added cipher speed tests (Reyk Floeter <reyk@vantronix.net>)
* 2003-09-14 Rewritten by Kartikey Mahendra Bhatt
*
*/
#include <crypto/hash.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
......@@ -30,7 +24,6 @@
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <linux/highmem.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
#include <linux/timex.h>
......@@ -38,7 +31,7 @@
#include "tcrypt.h"
/*
* Need to kmalloc() memory for testing kmap().
* Need to kmalloc() memory for testing.
*/
#define TVMEMSIZE 16384
#define XBUFSIZE 32768
......@@ -46,7 +39,7 @@
/*
* Indexes into the xbuf to simulate cross-page access.
*/
#define IDX1 37
#define IDX1 32
#define IDX2 32400
#define IDX3 1
#define IDX4 8193
......@@ -83,7 +76,8 @@ static char *check[] = {
"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
"camellia", "seed", "salsa20", "lzo", "cts", NULL
"camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
"lzo", "cts", NULL
};
static void hexdump(unsigned char *buf, unsigned int len)
......@@ -110,22 +104,30 @@ static void test_hash(char *algo, struct hash_testvec *template,
unsigned int i, j, k, temp;
struct scatterlist sg[8];
char result[64];
struct crypto_hash *tfm;
struct hash_desc desc;
struct crypto_ahash *tfm;
struct ahash_request *req;
struct tcrypt_result tresult;
int ret;
void *hash_buff;
printk("\ntesting %s\n", algo);
tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
init_completion(&tresult.completion);
tfm = crypto_alloc_ahash(algo, 0, 0);
if (IS_ERR(tfm)) {
printk("failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
return;
}
desc.tfm = tfm;
desc.flags = 0;
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
printk(KERN_ERR "failed to allocate request for %s\n", algo);
goto out_noreq;
}
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &tresult);
for (i = 0; i < tcount; i++) {
printk("test %u:\n", i + 1);
......@@ -139,8 +141,9 @@ static void test_hash(char *algo, struct hash_testvec *template,
sg_init_one(&sg[0], hash_buff, template[i].psize);
if (template[i].ksize) {
ret = crypto_hash_setkey(tfm, template[i].key,
template[i].ksize);
crypto_ahash_clear_flags(tfm, ~0);
ret = crypto_ahash_setkey(tfm, template[i].key,
template[i].ksize);
if (ret) {
printk("setkey() failed ret=%d\n", ret);
kfree(hash_buff);
......@@ -148,17 +151,30 @@ static void test_hash(char *algo, struct hash_testvec *template,
}
}
ret = crypto_hash_digest(&desc, sg, template[i].psize, result);
if (ret) {
ahash_request_set_crypt(req, sg, result, template[i].psize);
ret = crypto_ahash_digest(req);
switch (ret) {
case 0:
break;
case -EINPROGRESS:
case -EBUSY:
ret = wait_for_completion_interruptible(
&tresult.completion);
if (!ret && !(ret = tresult.err)) {
INIT_COMPLETION(tresult.completion);
break;
}
/* fall through */
default:
printk("digest () failed ret=%d\n", ret);
kfree(hash_buff);
goto out;
}
hexdump(result, crypto_hash_digestsize(tfm));
hexdump(result, crypto_ahash_digestsize(tfm));
printk("%s\n",
memcmp(result, template[i].digest,
crypto_hash_digestsize(tfm)) ?
crypto_ahash_digestsize(tfm)) ?
"fail" : "pass");
kfree(hash_buff);
}
......@@ -187,8 +203,9 @@ static void test_hash(char *algo, struct hash_testvec *template,
}
if (template[i].ksize) {
ret = crypto_hash_setkey(tfm, template[i].key,
template[i].ksize);
crypto_ahash_clear_flags(tfm, ~0);
ret = crypto_ahash_setkey(tfm, template[i].key,
template[i].ksize);
if (ret) {
printk("setkey() failed ret=%d\n", ret);
......@@ -196,29 +213,44 @@ static void test_hash(char *algo, struct hash_testvec *template,
}
}
ret = crypto_hash_digest(&desc, sg, template[i].psize,
result);
if (ret) {
ahash_request_set_crypt(req, sg, result,
template[i].psize);
ret = crypto_ahash_digest(req);
switch (ret) {
case 0:
break;
case -EINPROGRESS:
case -EBUSY:
ret = wait_for_completion_interruptible(
&tresult.completion);
if (!ret && !(ret = tresult.err)) {
INIT_COMPLETION(tresult.completion);
break;
}
/* fall through */
default:
printk("digest () failed ret=%d\n", ret);
goto out;
}
hexdump(result, crypto_hash_digestsize(tfm));
hexdump(result, crypto_ahash_digestsize(tfm));
printk("%s\n",
memcmp(result, template[i].digest,
crypto_hash_digestsize(tfm)) ?
crypto_ahash_digestsize(tfm)) ?
"fail" : "pass");
}
}
out:
crypto_free_hash(tfm);
ahash_request_free(req);
out_noreq:
crypto_free_ahash(tfm);
}
static void test_aead(char *algo, int enc, struct aead_testvec *template,
unsigned int tcount)
{
unsigned int ret, i, j, k, temp;
unsigned int ret, i, j, k, n, temp;
char *q;
struct crypto_aead *tfm;
char *key;
......@@ -344,13 +376,12 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template,
goto next_one;
}
q = kmap(sg_page(&sg[0])) + sg[0].offset;
q = input;
hexdump(q, template[i].rlen);
printk(KERN_INFO "enc/dec: %s\n",
memcmp(q, template[i].result,
template[i].rlen) ? "fail" : "pass");
kunmap(sg_page(&sg[0]));
next_one:
if (!template[i].key)
kfree(key);
......@@ -360,7 +391,6 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template,
}
printk(KERN_INFO "\ntesting %s %s across pages (chunking)\n", algo, e);
memset(xbuf, 0, XBUFSIZE);
memset(axbuf, 0, XBUFSIZE);
for (i = 0, j = 0; i < tcount; i++) {
......@@ -388,6 +418,7 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template,
goto out;
}
memset(xbuf, 0, XBUFSIZE);
sg_init_table(sg, template[i].np);
for (k = 0, temp = 0; k < template[i].np; k++) {
memcpy(&xbuf[IDX[k]],
......@@ -450,7 +481,7 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template,
for (k = 0, temp = 0; k < template[i].np; k++) {
printk(KERN_INFO "page %u\n", k);
q = kmap(sg_page(&sg[k])) + sg[k].offset;
q = &axbuf[IDX[k]];
hexdump(q, template[i].tap[k]);
printk(KERN_INFO "%s\n",
memcmp(q, template[i].result + temp,
......@@ -459,8 +490,15 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template,
0 : authsize)) ?
"fail" : "pass");
for (n = 0; q[template[i].tap[k] + n]; n++)
;
if (n) {
printk("Result buffer corruption %u "
"bytes:\n", n);
hexdump(&q[template[i].tap[k]], n);
}
temp += template[i].tap[k];
kunmap(sg_page(&sg[k]));
}
}
}
......@@ -473,7 +511,7 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template,
static void test_cipher(char *algo, int enc,
struct cipher_testvec *template, unsigned int tcount)
{
unsigned int ret, i, j, k, temp;
unsigned int ret, i, j, k, n, temp;
char *q;
struct crypto_ablkcipher *tfm;
struct ablkcipher_request *req;
......@@ -569,19 +607,17 @@ static void test_cipher(char *algo, int enc,
goto out;
}
q = kmap(sg_page(&sg[0])) + sg[0].offset;
q = data;
hexdump(q, template[i].rlen);
printk("%s\n",
memcmp(q, template[i].result,
template[i].rlen) ? "fail" : "pass");
kunmap(sg_page(&sg[0]));
}
kfree(data);
}
printk("\ntesting %s %s across pages (chunking)\n", algo, e);
memset(xbuf, 0, XBUFSIZE);
j = 0;
for (i = 0; i < tcount; i++) {
......@@ -596,6 +632,7 @@ static void test_cipher(char *algo, int enc,
printk("test %u (%d bit key):\n",
j, template[i].klen * 8);
memset(xbuf, 0, XBUFSIZE);
crypto_ablkcipher_clear_flags(tfm, ~0);
if (template[i].wk)
crypto_ablkcipher_set_flags(
......@@ -649,14 +686,21 @@ static void test_cipher(char *algo, int enc,
temp = 0;
for (k = 0; k < template[i].np; k++) {
printk("page %u\n", k);
q = kmap(sg_page(&sg[k])) + sg[k].offset;
q = &xbuf[IDX[k]];
hexdump(q, template[i].tap[k]);
printk("%s\n",
memcmp(q, template[i].result + temp,
template[i].tap[k]) ? "fail" :
"pass");
for (n = 0; q[template[i].tap[k] + n]; n++)
;
if (n) {
printk("Result buffer corruption %u "
"bytes:\n", n);
hexdump(&q[template[i].tap[k]], n);
}
temp += template[i].tap[k];
kunmap(sg_page(&sg[k]));
}
}
}
......@@ -1172,6 +1216,14 @@ static void do_test(void)
test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template,
DES3_EDE_DEC_TEST_VECTORS);
test_cipher("cbc(des3_ede)", ENCRYPT,
des3_ede_cbc_enc_tv_template,
DES3_EDE_CBC_ENC_TEST_VECTORS);
test_cipher("cbc(des3_ede)", DECRYPT,
des3_ede_cbc_dec_tv_template,
DES3_EDE_CBC_DEC_TEST_VECTORS);
test_hash("md4", md4_tv_template, MD4_TEST_VECTORS);
test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS);
......@@ -1382,6 +1434,14 @@ static void do_test(void)
DES3_EDE_ENC_TEST_VECTORS);
test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template,
DES3_EDE_DEC_TEST_VECTORS);
test_cipher("cbc(des3_ede)", ENCRYPT,
des3_ede_cbc_enc_tv_template,
DES3_EDE_CBC_ENC_TEST_VECTORS);
test_cipher("cbc(des3_ede)", DECRYPT,
des3_ede_cbc_dec_tv_template,
DES3_EDE_CBC_DEC_TEST_VECTORS);
break;
case 5:
......@@ -1550,7 +1610,7 @@ static void do_test(void)
case 29:
test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS);
break;
case 30:
test_cipher("ecb(xeta)", ENCRYPT, xeta_enc_tv_template,
XETA_ENC_TEST_VECTORS);
......@@ -1615,6 +1675,22 @@ static void do_test(void)
CTS_MODE_DEC_TEST_VECTORS);
break;
case 39:
test_hash("rmd128", rmd128_tv_template, RMD128_TEST_VECTORS);
break;
case 40:
test_hash("rmd160", rmd160_tv_template, RMD160_TEST_VECTORS);
break;
case 41:
test_hash("rmd256", rmd256_tv_template, RMD256_TEST_VECTORS);
break;
case 42:
test_hash("rmd320", rmd320_tv_template, RMD320_TEST_VECTORS);
break;
case 100:
test_hash("hmac(md5)", hmac_md5_tv_template,
HMAC_MD5_TEST_VECTORS);
......@@ -1650,6 +1726,16 @@ static void do_test(void)
XCBC_AES_TEST_VECTORS);
break;
case 107:
test_hash("hmac(rmd128)", hmac_rmd128_tv_template,
HMAC_RMD128_TEST_VECTORS);
break;
case 108:
test_hash("hmac(rmd160)", hmac_rmd160_tv_template,
HMAC_RMD160_TEST_VECTORS);
break;
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
......@@ -1788,6 +1874,22 @@ static void do_test(void)
test_hash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 314:
test_hash_speed("rmd128", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 315:
test_hash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 316:
test_hash_speed("rmd256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 317:
test_hash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 399:
break;
......
......@@ -13,12 +13,6 @@
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* 2007-11-13 Added GCM tests
* 2007-11-13 Added AEAD support
* 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
* 2004-08-09 Cipher speed tests by Reyk Floeter <reyk@vantronix.net>
* 2003-09-14 Changes by Kartikey Mahendra Bhatt
*
*/
#ifndef _CRYPTO_TCRYPT_H
#define _CRYPTO_TCRYPT_H
......@@ -168,6 +162,271 @@ static struct hash_testvec md5_tv_template[] = {
.digest = "\x57\xed\xf4\xa2\x2b\xe3\xc9\x55"
"\xac\x49\xda\x2e\x21\x07\xb6\x7a",
}
};
/*
* RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
*/
#define RMD128_TEST_VECTORS 10
static struct hash_testvec rmd128_tv_template[] = {
{
.digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
"\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
}, {
.plaintext = "a",
.psize = 1,
.digest = "\x86\xbe\x7a\xfa\x33\x9d\x0f\xc7"
"\xcf\xc7\x85\xe7\x2f\x57\x8d\x33",
}, {
.plaintext = "abc",
.psize = 3,
.digest = "\xc1\x4a\x12\x19\x9c\x66\xe4\xba"
"\x84\x63\x6b\x0f\x69\x14\x4c\x77",
}, {
.plaintext = "message digest",
.psize = 14,
.digest = "\x9e\x32\x7b\x3d\x6e\x52\x30\x62"
"\xaf\xc1\x13\x2d\x7d\xf9\xd1\xb8",
}, {
.plaintext = "abcdefghijklmnopqrstuvwxyz",
.psize = 26,
.digest = "\xfd\x2a\xa6\x07\xf7\x1d\xc8\xf5"
"\x10\x71\x49\x22\xb3\x71\x83\x4e",
}, {
.plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
"fghijklmnopqrstuvwxyz0123456789",
.psize = 62,
.digest = "\xd1\xe9\x59\xeb\x17\x9c\x91\x1f"
"\xae\xa4\x62\x4c\x60\xc5\xc7\x02",
}, {
.plaintext = "1234567890123456789012345678901234567890"
"1234567890123456789012345678901234567890",
.psize = 80,
.digest = "\x3f\x45\xef\x19\x47\x32\xc2\xdb"
"\xb2\xc4\xa2\xc7\x69\x79\x5f\xa3",
}, {
.plaintext = "abcdbcdecdefdefgefghfghighij"
"hijkijkljklmklmnlmnomnopnopq",
.psize = 56,
.digest = "\xa1\xaa\x06\x89\xd0\xfa\xfa\x2d"
"\xdc\x22\xe8\x8b\x49\x13\x3a\x06",
.np = 2,
.tap = { 28, 28 },
}, {
.plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
"jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr"
"lmnopqrsmnopqrstnopqrstu",
.psize = 112,
.digest = "\xd4\xec\xc9\x13\xe1\xdf\x77\x6b"
"\xf4\x8d\xe9\xd5\x5b\x1f\x25\x46",
}, {
.plaintext = "abcdbcdecdefdefgefghfghighijhijk",
.psize = 32,
.digest = "\x13\xfc\x13\xe8\xef\xff\x34\x7d"
"\xe1\x93\xff\x46\xdb\xac\xcf\xd4",
}
};
/*
* RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
*/
#define RMD160_TEST_VECTORS 10
static struct hash_testvec rmd160_tv_template[] = {
{
.digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
"\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
}, {
.plaintext = "a",
.psize = 1,
.digest = "\x0b\xdc\x9d\x2d\x25\x6b\x3e\xe9\xda\xae"
"\x34\x7b\xe6\xf4\xdc\x83\x5a\x46\x7f\xfe",
}, {
.plaintext = "abc",
.psize = 3,
.digest = "\x8e\xb2\x08\xf7\xe0\x5d\x98\x7a\x9b\x04"
"\x4a\x8e\x98\xc6\xb0\x87\xf1\x5a\x0b\xfc",
}, {
.plaintext = "message digest",
.psize = 14,
.digest = "\x5d\x06\x89\xef\x49\xd2\xfa\xe5\x72\xb8"
"\x81\xb1\x23\xa8\x5f\xfa\x21\x59\x5f\x36",
}, {
.plaintext = "abcdefghijklmnopqrstuvwxyz",
.psize = 26,
.digest = "\xf7\x1c\x27\x10\x9c\x69\x2c\x1b\x56\xbb"
"\xdc\xeb\x5b\x9d\x28\x65\xb3\x70\x8d\xbc",
}, {
.plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
"fghijklmnopqrstuvwxyz0123456789",
.psize = 62,
.digest = "\xb0\xe2\x0b\x6e\x31\x16\x64\x02\x86\xed"
"\x3a\x87\xa5\x71\x30\x79\xb2\x1f\x51\x89",
}, {
.plaintext = "1234567890123456789012345678901234567890"
"1234567890123456789012345678901234567890",
.psize = 80,
.digest = "\x9b\x75\x2e\x45\x57\x3d\x4b\x39\xf4\xdb"
"\xd3\x32\x3c\xab\x82\xbf\x63\x32\x6b\xfb",
}, {
.plaintext = "abcdbcdecdefdefgefghfghighij"
"hijkijkljklmklmnlmnomnopnopq",
.psize = 56,
.digest = "\x12\xa0\x53\x38\x4a\x9c\x0c\x88\xe4\x05"
"\xa0\x6c\x27\xdc\xf4\x9a\xda\x62\xeb\x2b",
.np = 2,
.tap = { 28, 28 },
}, {
.plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
"jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr"
"lmnopqrsmnopqrstnopqrstu",
.psize = 112,
.digest = "\x6f\x3f\xa3\x9b\x6b\x50\x3c\x38\x4f\x91"
"\x9a\x49\xa7\xaa\x5c\x2c\x08\xbd\xfb\x45",
}, {
.plaintext = "abcdbcdecdefdefgefghfghighijhijk",
.psize = 32,
.digest = "\x94\xc2\x64\x11\x54\x04\xe6\x33\x79\x0d"
"\xfc\xc8\x7b\x58\x7d\x36\x77\x06\x7d\x9f",
}
};
/*
* RIPEMD-256 test vectors
*/
#define RMD256_TEST_VECTORS 8
static struct hash_testvec rmd256_tv_template[] = {
{
.digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
"\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
"\x2d\x97\x74\xfb\x1e\x5d\x02\x63"
"\x80\xae\x01\x68\xe3\xc5\x52\x2d",
}, {
.plaintext = "a",
.psize = 1,
.digest = "\xf9\x33\x3e\x45\xd8\x57\xf5\xd9"
"\x0a\x91\xba\xb7\x0a\x1e\xba\x0c"
"\xfb\x1b\xe4\xb0\x78\x3c\x9a\xcf"
"\xcd\x88\x3a\x91\x34\x69\x29\x25",
}, {
.plaintext = "abc",
.psize = 3,
.digest = "\xaf\xbd\x6e\x22\x8b\x9d\x8c\xbb"
"\xce\xf5\xca\x2d\x03\xe6\xdb\xa1"
"\x0a\xc0\xbc\x7d\xcb\xe4\x68\x0e"
"\x1e\x42\xd2\xe9\x75\x45\x9b\x65",
}, {
.plaintext = "message digest",
.psize = 14,
.digest = "\x87\xe9\x71\x75\x9a\x1c\xe4\x7a"
"\x51\x4d\x5c\x91\x4c\x39\x2c\x90"
"\x18\xc7\xc4\x6b\xc1\x44\x65\x55"
"\x4a\xfc\xdf\x54\xa5\x07\x0c\x0e",
}, {
.plaintext = "abcdefghijklmnopqrstuvwxyz",
.psize = 26,
.digest = "\x64\x9d\x30\x34\x75\x1e\xa2\x16"
"\x77\x6b\xf9\xa1\x8a\xcc\x81\xbc"
"\x78\x96\x11\x8a\x51\x97\x96\x87"
"\x82\xdd\x1f\xd9\x7d\x8d\x51\x33",
}, {
.plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
"fghijklmnopqrstuvwxyz0123456789",
.psize = 62,
.digest = "\x57\x40\xa4\x08\xac\x16\xb7\x20"
"\xb8\x44\x24\xae\x93\x1c\xbb\x1f"
"\xe3\x63\xd1\xd0\xbf\x40\x17\xf1"
"\xa8\x9f\x7e\xa6\xde\x77\xa0\xb8",
}, {
.plaintext = "1234567890123456789012345678901234567890"
"1234567890123456789012345678901234567890",
.psize = 80,
.digest = "\x06\xfd\xcc\x7a\x40\x95\x48\xaa"
"\xf9\x13\x68\xc0\x6a\x62\x75\xb5"
"\x53\xe3\xf0\x99\xbf\x0e\xa4\xed"
"\xfd\x67\x78\xdf\x89\xa8\x90\xdd",
}, {
.plaintext = "abcdbcdecdefdefgefghfghighij"
"hijkijkljklmklmnlmnomnopnopq",
.psize = 56,
.digest = "\x38\x43\x04\x55\x83\xaa\xc6\xc8"
"\xc8\xd9\x12\x85\x73\xe7\xa9\x80"
"\x9a\xfb\x2a\x0f\x34\xcc\xc3\x6e"
"\xa9\xe7\x2f\x16\xf6\x36\x8e\x3f",
.np = 2,
.tap = { 28, 28 },
}
};
/*
* RIPEMD-320 test vectors
*/
#define RMD320_TEST_VECTORS 8
static struct hash_testvec rmd320_tv_template[] = {
{
.digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
"\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
"\xeb\xc6\x1e\x85\x57\x17\x7d\x70\x5a\x0e"
"\xc8\x80\x15\x1c\x3a\x32\xa0\x08\x99\xb8",
}, {
.plaintext = "a",
.psize = 1,
.digest = "\xce\x78\x85\x06\x38\xf9\x26\x58\xa5\xa5"
"\x85\x09\x75\x79\x92\x6d\xda\x66\x7a\x57"
"\x16\x56\x2c\xfc\xf6\xfb\xe7\x7f\x63\x54"
"\x2f\x99\xb0\x47\x05\xd6\x97\x0d\xff\x5d",
}, {
.plaintext = "abc",
.psize = 3,
.digest = "\xde\x4c\x01\xb3\x05\x4f\x89\x30\xa7\x9d"
"\x09\xae\x73\x8e\x92\x30\x1e\x5a\x17\x08"
"\x5b\xef\xfd\xc1\xb8\xd1\x16\x71\x3e\x74"
"\xf8\x2f\xa9\x42\xd6\x4c\xdb\xc4\x68\x2d",
}, {
.plaintext = "message digest",
.psize = 14,
.digest = "\x3a\x8e\x28\x50\x2e\xd4\x5d\x42\x2f\x68"
"\x84\x4f\x9d\xd3\x16\xe7\xb9\x85\x33\xfa"
"\x3f\x2a\x91\xd2\x9f\x84\xd4\x25\xc8\x8d"
"\x6b\x4e\xff\x72\x7d\xf6\x6a\x7c\x01\x97",
}, {
.plaintext = "abcdefghijklmnopqrstuvwxyz",
.psize = 26,
.digest = "\xca\xbd\xb1\x81\x0b\x92\x47\x0a\x20\x93"
"\xaa\x6b\xce\x05\x95\x2c\x28\x34\x8c\xf4"
"\x3f\xf6\x08\x41\x97\x51\x66\xbb\x40\xed"
"\x23\x40\x04\xb8\x82\x44\x63\xe6\xb0\x09",
}, {
.plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
"fghijklmnopqrstuvwxyz0123456789",
.psize = 62,
.digest = "\xed\x54\x49\x40\xc8\x6d\x67\xf2\x50\xd2"
"\x32\xc3\x0b\x7b\x3e\x57\x70\xe0\xc6\x0c"
"\x8c\xb9\xa4\xca\xfe\x3b\x11\x38\x8a\xf9"
"\x92\x0e\x1b\x99\x23\x0b\x84\x3c\x86\xa4",
}, {
.plaintext = "1234567890123456789012345678901234567890"
"1234567890123456789012345678901234567890",
.psize = 80,
.digest = "\x55\x78\x88\xaf\x5f\x6d\x8e\xd6\x2a\xb6"
"\x69\x45\xc6\xd2\xa0\xa4\x7e\xcd\x53\x41"
"\xe9\x15\xeb\x8f\xea\x1d\x05\x24\x95\x5f"
"\x82\x5d\xc7\x17\xe4\xa0\x08\xab\x2d\x42",
}, {
.plaintext = "abcdbcdecdefdefgefghfghighij"
"hijkijkljklmklmnlmnomnopnopq",
.psize = 56,
.digest = "\xd0\x34\xa7\x95\x0c\xf7\x22\x02\x1b\xa4"
"\xb8\x4d\xf7\x69\xa5\xde\x20\x60\xe2\x59"
"\xdf\x4c\x9b\xb4\xa4\x26\x8c\x0e\x93\x5b"
"\xbc\x74\x70\xa9\x69\xc9\xd0\x72\xa1\xac",
.np = 2,
.tap = { 28, 28 },
}
};
/*
......@@ -816,6 +1075,168 @@ static struct hash_testvec hmac_md5_tv_template[] =
},
};
/*
* HMAC-RIPEMD128 test vectors from RFC2286
*/
#define HMAC_RMD128_TEST_VECTORS 7
static struct hash_testvec hmac_rmd128_tv_template[] = {
{
.key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
.ksize = 16,
.plaintext = "Hi There",
.psize = 8,
.digest = "\xfb\xf6\x1f\x94\x92\xaa\x4b\xbf"
"\x81\xc1\x72\xe8\x4e\x07\x34\xdb",
}, {
.key = "Jefe",
.ksize = 4,
.plaintext = "what do ya want for nothing?",
.psize = 28,
.digest = "\x87\x5f\x82\x88\x62\xb6\xb3\x34"
"\xb4\x27\xc5\x5f\x9f\x7f\xf0\x9b",
.np = 2,
.tap = { 14, 14 },
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
.ksize = 16,
.plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
.psize = 50,
.digest = "\x09\xf0\xb2\x84\x6d\x2f\x54\x3d"
"\xa3\x63\xcb\xec\x8d\x62\xa3\x8d",
}, {
.key = "\x01\x02\x03\x04\x05\x06\x07\x08"
"\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
"\x11\x12\x13\x14\x15\x16\x17\x18\x19",
.ksize = 25,
.plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
.psize = 50,
.digest = "\xbd\xbb\xd7\xcf\x03\xe4\x4b\x5a"
"\xa6\x0a\xf8\x15\xbe\x4d\x22\x94",
}, {
.key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
.ksize = 16,
.plaintext = "Test With Truncation",
.psize = 20,
.digest = "\xe7\x98\x08\xf2\x4b\x25\xfd\x03"
"\x1c\x15\x5f\x0d\x55\x1d\x9a\x3a",
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa",
.ksize = 80,
.plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
.psize = 54,
.digest = "\xdc\x73\x29\x28\xde\x98\x10\x4a"
"\x1f\x59\xd3\x73\xc1\x50\xac\xbb",
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa",
.ksize = 80,
.plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
"Block-Size Data",
.psize = 73,
.digest = "\x5c\x6b\xec\x96\x79\x3e\x16\xd4"
"\x06\x90\xc2\x37\x63\x5f\x30\xc5",
},
};
/*
* HMAC-RIPEMD160 test vectors from RFC2286
*/
#define HMAC_RMD160_TEST_VECTORS 7
static struct hash_testvec hmac_rmd160_tv_template[] = {
{
.key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
.ksize = 20,
.plaintext = "Hi There",
.psize = 8,
.digest = "\x24\xcb\x4b\xd6\x7d\x20\xfc\x1a\x5d\x2e"
"\xd7\x73\x2d\xcc\x39\x37\x7f\x0a\x56\x68",
}, {
.key = "Jefe",
.ksize = 4,
.plaintext = "what do ya want for nothing?",
.psize = 28,
.digest = "\xdd\xa6\xc0\x21\x3a\x48\x5a\x9e\x24\xf4"
"\x74\x20\x64\xa7\xf0\x33\xb4\x3c\x40\x69",
.np = 2,
.tap = { 14, 14 },
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
.ksize = 20,
.plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
.psize = 50,
.digest = "\xb0\xb1\x05\x36\x0d\xe7\x59\x96\x0a\xb4"
"\xf3\x52\x98\xe1\x16\xe2\x95\xd8\xe7\xc1",
}, {
.key = "\x01\x02\x03\x04\x05\x06\x07\x08"
"\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
"\x11\x12\x13\x14\x15\x16\x17\x18\x19",
.ksize = 25,
.plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
"\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
.psize = 50,
.digest = "\xd5\xca\x86\x2f\x4d\x21\xd5\xe6\x10\xe1"
"\x8b\x4c\xf1\xbe\xb9\x7a\x43\x65\xec\xf4",
}, {
.key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
.ksize = 20,
.plaintext = "Test With Truncation",
.psize = 20,
.digest = "\x76\x19\x69\x39\x78\xf9\x1d\x90\x53\x9a"
"\xe7\x86\x50\x0f\xf3\xd8\xe0\x51\x8e\x39",
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa",
.ksize = 80,
.plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
.psize = 54,
.digest = "\x64\x66\xca\x07\xac\x5e\xac\x29\xe1\xbd"
"\x52\x3e\x5a\xda\x76\x05\xb7\x91\xfd\x8b",
}, {
.key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
"\xaa\xaa",
.ksize = 80,
.plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
"Block-Size Data",
.psize = 73,
.digest = "\x69\xea\x60\x79\x8d\x71\x61\x6c\xce\x5f"
"\xd0\x87\x1e\x23\x75\x4c\xd7\x5d\x5a\x0a",
},
};
/*
* HMAC-SHA1 test vectors from RFC2202
*/
......@@ -1442,6 +1863,8 @@ static struct hash_testvec hmac_sha512_tv_template[] = {
#define DES_CBC_DEC_TEST_VECTORS 4
#define DES3_EDE_ENC_TEST_VECTORS 3
#define DES3_EDE_DEC_TEST_VECTORS 3
#define DES3_EDE_CBC_ENC_TEST_VECTORS 1
#define DES3_EDE_CBC_DEC_TEST_VECTORS 1
static struct cipher_testvec des_enc_tv_template[] = {
{ /* From Applied Cryptography */
......@@ -1680,9 +2103,6 @@ static struct cipher_testvec des_cbc_dec_tv_template[] = {
},
};
/*
* We really need some more test vectors, especially for DES3 CBC.
*/
static struct cipher_testvec des3_ede_enc_tv_template[] = {
{ /* These are from openssl */
.key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
......@@ -1745,6 +2165,94 @@ static struct cipher_testvec des3_ede_dec_tv_template[] = {
},
};
static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
{ /* Generated from openssl */
.key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
"\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
"\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
.klen = 24,
.iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
.input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
"\x53\x20\x63\x65\x65\x72\x73\x74"
"\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
"\x20\x79\x65\x53\x72\x63\x74\x65"
"\x20\x73\x6f\x54\x20\x6f\x61\x4d"
"\x79\x6e\x53\x20\x63\x65\x65\x72"
"\x73\x74\x54\x20\x6f\x6f\x4d\x20"
"\x6e\x61\x20\x79\x65\x53\x72\x63"
"\x74\x65\x20\x73\x6f\x54\x20\x6f"
"\x61\x4d\x79\x6e\x53\x20\x63\x65"
"\x65\x72\x73\x74\x54\x20\x6f\x6f"
"\x4d\x20\x6e\x61\x20\x79\x65\x53"
"\x72\x63\x74\x65\x20\x73\x6f\x54"
"\x20\x6f\x61\x4d\x79\x6e\x53\x20"
"\x63\x65\x65\x72\x73\x74\x54\x20"
"\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
.ilen = 128,
.result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
"\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
"\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
"\x12\x56\x5c\x53\x96\xb6\x00\x7d"
"\x90\x48\xfc\xf5\x8d\x29\x39\xcc"
"\x8a\xd5\x35\x18\x36\x23\x4e\xd7"
"\x76\xd1\xda\x0c\x94\x67\xbb\x04"
"\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea"
"\x22\x64\x47\xaa\x8f\x75\x13\xbf"
"\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a"
"\x71\x63\x2e\x89\x7b\x1e\x12\xca"
"\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a"
"\xd6\xf9\x21\x31\x62\x44\x45\xa6"
"\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc"
"\x9d\xde\xa5\x70\xe9\x42\x45\x8a"
"\x6b\xfa\xb1\x91\x13\xb0\xd9\x19",
.rlen = 128,
},
};
static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
{ /* Generated from openssl */
.key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
"\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
"\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
.klen = 24,
.iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
.input = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
"\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
"\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
"\x12\x56\x5c\x53\x96\xb6\x00\x7d"
"\x90\x48\xfc\xf5\x8d\x29\x39\xcc"
"\x8a\xd5\x35\x18\x36\x23\x4e\xd7"
"\x76\xd1\xda\x0c\x94\x67\xbb\x04"
"\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea"
"\x22\x64\x47\xaa\x8f\x75\x13\xbf"
"\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a"
"\x71\x63\x2e\x89\x7b\x1e\x12\xca"
"\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a"
"\xd6\xf9\x21\x31\x62\x44\x45\xa6"
"\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc"
"\x9d\xde\xa5\x70\xe9\x42\x45\x8a"
"\x6b\xfa\xb1\x91\x13\xb0\xd9\x19",
.ilen = 128,
.result = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
"\x53\x20\x63\x65\x65\x72\x73\x74"
"\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
"\x20\x79\x65\x53\x72\x63\x74\x65"
"\x20\x73\x6f\x54\x20\x6f\x61\x4d"
"\x79\x6e\x53\x20\x63\x65\x65\x72"
"\x73\x74\x54\x20\x6f\x6f\x4d\x20"
"\x6e\x61\x20\x79\x65\x53\x72\x63"
"\x74\x65\x20\x73\x6f\x54\x20\x6f"
"\x61\x4d\x79\x6e\x53\x20\x63\x65"
"\x65\x72\x73\x74\x54\x20\x6f\x6f"
"\x4d\x20\x6e\x61\x20\x79\x65\x53"
"\x72\x63\x74\x65\x20\x73\x6f\x54"
"\x20\x6f\x61\x4d\x79\x6e\x53\x20"
"\x63\x65\x65\x72\x73\x74\x54\x20"
"\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
.rlen = 128,
},
};
/*
* Blowfish test vectors.
*/
......
......@@ -174,4 +174,30 @@ config CRYPTO_DEV_HIFN_795X_RNG
Select this option if you want to enable the random number generator
on the HIFN 795x crypto adapters.
config CRYPTO_DEV_TALITOS
tristate "Talitos Freescale Security Engine (SEC)"
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
select HW_RANDOM
depends on FSL_SOC
help
Say 'Y' here to use the Freescale Security Engine (SEC)
to offload cryptographic algorithm computation.
The Freescale SEC is present on PowerQUICC 'E' processors, such
as the MPC8349E and MPC8548E.
To compile this driver as a module, choose M here: the module
will be called talitos.
config CRYPTO_DEV_IXP4XX
tristate "Driver for IXP4xx crypto hardware acceleration"
depends on ARCH_IXP4XX
select CRYPTO_DES
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
help
Driver for the IXP4xx NPE crypto engine.
endif # CRYPTO_HW
......@@ -2,3 +2,5 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
......@@ -29,7 +29,6 @@
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/crypto.h>
#include <linux/hw_random.h>
#include <linux/ktime.h>
......@@ -369,7 +368,9 @@ static atomic_t hifn_dev_number;
#define HIFN_D_DST_RSIZE 80*4
#define HIFN_D_RES_RSIZE 24*4
#define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-5
#define HIFN_D_DST_DALIGN 4
#define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-1
#define AES_MIN_KEY_SIZE 16
#define AES_MAX_KEY_SIZE 32
......@@ -535,10 +536,10 @@ struct hifn_crypt_command
*/
struct hifn_mac_command
{
volatile u16 masks;
volatile u16 header_skip;
volatile u16 source_count;
volatile u16 reserved;
volatile __le16 masks;
volatile __le16 header_skip;
volatile __le16 source_count;
volatile __le16 reserved;
};
#define HIFN_MAC_CMD_ALG_MASK 0x0001
......@@ -564,10 +565,10 @@ struct hifn_mac_command
struct hifn_comp_command
{
volatile u16 masks;
volatile u16 header_skip;
volatile u16 source_count;
volatile u16 reserved;
volatile __le16 masks;
volatile __le16 header_skip;
volatile __le16 source_count;
volatile __le16 reserved;
};
#define HIFN_COMP_CMD_SRCLEN_M 0xc000
......@@ -583,10 +584,10 @@ struct hifn_comp_command
struct hifn_base_result
{
volatile u16 flags;
volatile u16 session;
volatile u16 src_cnt; /* 15:0 of source count */
volatile u16 dst_cnt; /* 15:0 of dest count */
volatile __le16 flags;
volatile __le16 session;
volatile __le16 src_cnt; /* 15:0 of source count */
volatile __le16 dst_cnt; /* 15:0 of dest count */
};
#define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */
......@@ -597,8 +598,8 @@ struct hifn_base_result
struct hifn_comp_result
{
volatile u16 flags;
volatile u16 crc;
volatile __le16 flags;
volatile __le16 crc;
};
#define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */
......@@ -609,8 +610,8 @@ struct hifn_comp_result
struct hifn_mac_result
{
volatile u16 flags;
volatile u16 reserved;
volatile __le16 flags;
volatile __le16 reserved;
/* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */
};
......@@ -619,8 +620,8 @@ struct hifn_mac_result
struct hifn_crypt_result
{
volatile u16 flags;
volatile u16 reserved;
volatile __le16 flags;
volatile __le16 reserved;
};
#define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */
......@@ -686,12 +687,12 @@ static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
{
writel(val, dev->bar[0] + reg);
writel((__force u32)cpu_to_le32(val), dev->bar[0] + reg);
}
static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val)
{
writel(val, dev->bar[1] + reg);
writel((__force u32)cpu_to_le32(val), dev->bar[1] + reg);
}
static void hifn_wait_puc(struct hifn_device *dev)
......@@ -894,7 +895,7 @@ static int hifn_enable_crypto(struct hifn_device *dev)
char *offtbl = NULL;
int i;
for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
for (i = 0; i < ARRAY_SIZE(pci2id); i++) {
if (pci2id[i].pci_vendor == dev->pdev->vendor &&
pci2id[i].pci_prod == dev->pdev->device) {
offtbl = pci2id[i].card_id;
......@@ -1037,14 +1038,14 @@ static void hifn_init_registers(struct hifn_device *dev)
hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
/* write all 4 ring address registers */
hifn_write_1(dev, HIFN_1_DMA_CRAR, __cpu_to_le32(dptr +
offsetof(struct hifn_dma, cmdr[0])));
hifn_write_1(dev, HIFN_1_DMA_SRAR, __cpu_to_le32(dptr +
offsetof(struct hifn_dma, srcr[0])));
hifn_write_1(dev, HIFN_1_DMA_DRAR, __cpu_to_le32(dptr +
offsetof(struct hifn_dma, dstr[0])));
hifn_write_1(dev, HIFN_1_DMA_RRAR, __cpu_to_le32(dptr +
offsetof(struct hifn_dma, resr[0])));
hifn_write_1(dev, HIFN_1_DMA_CRAR, dptr +
offsetof(struct hifn_dma, cmdr[0]));
hifn_write_1(dev, HIFN_1_DMA_SRAR, dptr +
offsetof(struct hifn_dma, srcr[0]));
hifn_write_1(dev, HIFN_1_DMA_DRAR, dptr +
offsetof(struct hifn_dma, dstr[0]));
hifn_write_1(dev, HIFN_1_DMA_RRAR, dptr +
offsetof(struct hifn_dma, resr[0]));
mdelay(2);
#if 0
......@@ -1166,109 +1167,15 @@ static int hifn_setup_crypto_command(struct hifn_device *dev,
return cmd_len;
}
static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
unsigned int offset, unsigned int size)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int idx;
dma_addr_t addr;
addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
idx = dma->srci;
dma->srcr[idx].p = __cpu_to_le32(addr);
dma->srcr[idx].l = __cpu_to_le32(size) | HIFN_D_VALID |
HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST;
if (++idx == HIFN_D_SRC_RSIZE) {
dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
HIFN_D_JUMP |
HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
idx = 0;
}
dma->srci = idx;
dma->srcu++;
if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
dev->flags |= HIFN_FLAG_SRC_BUSY;
}
return size;
}
static void hifn_setup_res_desc(struct hifn_device *dev)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
HIFN_D_VALID | HIFN_D_LAST);
/*
* dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
* HIFN_D_LAST | HIFN_D_NOINVALID);
*/
if (++dma->resi == HIFN_D_RES_RSIZE) {
dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
dma->resi = 0;
}
dma->resu++;
if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
dev->flags |= HIFN_FLAG_RES_BUSY;
}
}
static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
unsigned offset, unsigned size)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int idx;
dma_addr_t addr;
addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
idx = dma->dsti;
dma->dstr[idx].p = __cpu_to_le32(addr);
dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST);
if (++idx == HIFN_D_DST_RSIZE) {
dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
HIFN_D_LAST | HIFN_D_NOINVALID);
idx = 0;
}
dma->dsti = idx;
dma->dstu++;
if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
dev->flags |= HIFN_FLAG_DST_BUSY;
}
}
static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
struct hifn_context *ctx)
static int hifn_setup_cmd_desc(struct hifn_device *dev,
struct hifn_context *ctx, void *priv, unsigned int nbytes)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int cmd_len, sa_idx;
u8 *buf, *buf_pos;
u16 mask;
dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n",
dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
sa_idx = dma->resi;
hifn_setup_src_desc(dev, spage, soff, nbytes);
sa_idx = dma->cmdi;
buf_pos = buf = dma->command_bufs[dma->cmdi];
mask = 0;
......@@ -1370,16 +1277,113 @@ static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
dev->flags |= HIFN_FLAG_CMD_BUSY;
}
hifn_setup_dst_desc(dev, dpage, doff, nbytes);
hifn_setup_res_desc(dev);
return 0;
err_out:
return -EINVAL;
}
static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
unsigned int offset, unsigned int size)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int idx;
dma_addr_t addr;
addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
idx = dma->srci;
dma->srcr[idx].p = __cpu_to_le32(addr);
dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
if (++idx == HIFN_D_SRC_RSIZE) {
dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
HIFN_D_JUMP |
HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
idx = 0;
}
dma->srci = idx;
dma->srcu++;
if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
dev->flags |= HIFN_FLAG_SRC_BUSY;
}
return size;
}
static void hifn_setup_res_desc(struct hifn_device *dev)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
HIFN_D_VALID | HIFN_D_LAST);
/*
* dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
* HIFN_D_LAST);
*/
if (++dma->resi == HIFN_D_RES_RSIZE) {
dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
dma->resi = 0;
}
dma->resu++;
if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
dev->flags |= HIFN_FLAG_RES_BUSY;
}
}
static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
unsigned offset, unsigned size)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int idx;
dma_addr_t addr;
addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
idx = dma->dsti;
dma->dstr[idx].p = __cpu_to_le32(addr);
dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
if (++idx == HIFN_D_DST_RSIZE) {
dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
HIFN_D_LAST);
idx = 0;
}
dma->dsti = idx;
dma->dstu++;
if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
dev->flags |= HIFN_FLAG_DST_BUSY;
}
}
static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
struct hifn_context *ctx)
{
dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n",
dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
hifn_setup_src_desc(dev, spage, soff, nbytes);
hifn_setup_cmd_desc(dev, ctx, priv, nbytes);
hifn_setup_dst_desc(dev, dpage, doff, nbytes);
hifn_setup_res_desc(dev);
return 0;
}
static int ablkcipher_walk_init(struct ablkcipher_walk *w,
int num, gfp_t gfp_flags)
{
......@@ -1431,7 +1435,7 @@ static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist
return -EINVAL;
while (size) {
copy = min(drest, src->length);
copy = min(drest, min(size, src->length));
saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1);
memcpy(daddr, saddr + src->offset, copy);
......@@ -1458,10 +1462,6 @@ static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist
static int ablkcipher_walk(struct ablkcipher_request *req,
struct ablkcipher_walk *w)
{
unsigned blocksize =
crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
unsigned alignmask =
crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
struct scatterlist *src, *dst, *t;
void *daddr;
unsigned int nbytes = req->nbytes, offset, copy, diff;
......@@ -1477,16 +1477,14 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
dst = &req->dst[idx];
dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, "
"blocksize: %u, nbytes: %u.\n",
"nbytes: %u.\n",
__func__, src->length, dst->length, src->offset,
dst->offset, offset, blocksize, nbytes);
if (src->length & (blocksize - 1) ||
src->offset & (alignmask - 1) ||
dst->length & (blocksize - 1) ||
dst->offset & (alignmask - 1) ||
offset) {
unsigned slen = src->length - offset;
dst->offset, offset, nbytes);
if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
!IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) ||
offset) {
unsigned slen = min(src->length - offset, nbytes);
unsigned dlen = PAGE_SIZE;
t = &w->cache[idx];
......@@ -1498,8 +1496,8 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
idx += err;
copy = slen & ~(blocksize - 1);
diff = slen & (blocksize - 1);
copy = slen & ~(HIFN_D_DST_DALIGN - 1);
diff = slen & (HIFN_D_DST_DALIGN - 1);
if (dlen < nbytes) {
/*
......@@ -1507,7 +1505,7 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
* to put there additional blocksized chunk,
* so we mark that page as containing only
* blocksize aligned chunks:
* t->length = (slen & ~(blocksize - 1));
* t->length = (slen & ~(HIFN_D_DST_DALIGN - 1));
* and increase number of bytes to be processed
* in next chunk:
* nbytes += diff;
......@@ -1544,7 +1542,7 @@ static int ablkcipher_walk(struct ablkcipher_request *req,
kunmap_atomic(daddr, KM_SOFTIRQ0);
} else {
nbytes -= src->length;
nbytes -= min(src->length, nbytes);
idx++;
}
......@@ -1563,14 +1561,10 @@ static int hifn_setup_session(struct ablkcipher_request *req)
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
struct hifn_device *dev = ctx->dev;
struct page *spage, *dpage;
unsigned long soff, doff, flags;
unsigned long soff, doff, dlen, flags;
unsigned int nbytes = req->nbytes, idx = 0, len;
int err = -EINVAL, sg_num;
struct scatterlist *src, *dst, *t;
unsigned blocksize =
crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
unsigned alignmask =
crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB)
goto err_out_exit;
......@@ -1578,17 +1572,14 @@ static int hifn_setup_session(struct ablkcipher_request *req)
ctx->walk.flags = 0;
while (nbytes) {
src = &req->src[idx];
dst = &req->dst[idx];
dlen = min(dst->length, nbytes);
if (src->length & (blocksize - 1) ||
src->offset & (alignmask - 1) ||
dst->length & (blocksize - 1) ||
dst->offset & (alignmask - 1)) {
if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
!IS_ALIGNED(dlen, HIFN_D_DST_DALIGN))
ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED;
}
nbytes -= src->length;
nbytes -= dlen;
idx++;
}
......@@ -1602,7 +1593,10 @@ static int hifn_setup_session(struct ablkcipher_request *req)
idx = 0;
sg_num = ablkcipher_walk(req, &ctx->walk);
if (sg_num < 0) {
err = sg_num;
goto err_out_exit;
}
atomic_set(&ctx->sg_num, sg_num);
spin_lock_irqsave(&dev->lock, flags);
......@@ -1640,7 +1634,7 @@ static int hifn_setup_session(struct ablkcipher_request *req)
if (err)
goto err_out;
nbytes -= len;
nbytes -= min(len, nbytes);
}
dev->active = HIFN_DEFAULT_ACTIVE_NUM;
......@@ -1651,7 +1645,7 @@ static int hifn_setup_session(struct ablkcipher_request *req)
err_out:
spin_unlock_irqrestore(&dev->lock, flags);
err_out_exit:
if (err && printk_ratelimit())
if (err)
dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
"type: %u, err: %d.\n",
dev->name, ctx->iv, ctx->ivsize,
......@@ -1745,8 +1739,7 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset
return -EINVAL;
while (size) {
copy = min(dst->length, srest);
copy = min(srest, min(dst->length, size));
daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
memcpy(daddr + dst->offset + offset, saddr, copy);
......@@ -1803,7 +1796,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
sg_page(dst), dst->length, nbytes);
if (!t->length) {
nbytes -= dst->length;
nbytes -= min(dst->length, nbytes);
idx++;
continue;
}
......@@ -2202,9 +2195,9 @@ static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
return err;
if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen)
err = hifn_process_queue(dev);
hifn_process_queue(dev);
return err;
return -EINPROGRESS;
}
/*
......@@ -2364,7 +2357,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
* 3DES ECB, CBC, CFB and OFB modes.
*/
{
.name = "cfb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
.name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
......@@ -2374,7 +2367,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "ofb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
.name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
......@@ -2384,8 +2377,9 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "cbc(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
.name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8,
.ablkcipher = {
.ivsize = HIFN_IV_LENGTH,
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
.setkey = hifn_setkey,
......@@ -2394,7 +2388,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "ecb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
.name = "ecb(des3_ede)", .drv_name = "ecb-3des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
......@@ -2408,7 +2402,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
* DES ECB, CBC, CFB and OFB modes.
*/
{
.name = "cfb(des)", .drv_name = "hifn-des", .bsize = 8,
.name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
......@@ -2418,7 +2412,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "ofb(des)", .drv_name = "hifn-des", .bsize = 8,
.name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
......@@ -2428,8 +2422,9 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "cbc(des)", .drv_name = "hifn-des", .bsize = 8,
.name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8,
.ablkcipher = {
.ivsize = HIFN_IV_LENGTH,
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
.setkey = hifn_setkey,
......@@ -2438,7 +2433,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "ecb(des)", .drv_name = "hifn-des", .bsize = 8,
.name = "ecb(des)", .drv_name = "ecb-des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
......@@ -2452,7 +2447,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
* AES ECB, CBC, CFB and OFB modes.
*/
{
.name = "ecb(aes)", .drv_name = "hifn-aes", .bsize = 16,
.name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16,
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
......@@ -2462,8 +2457,9 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "cbc(aes)", .drv_name = "hifn-aes", .bsize = 16,
.name = "cbc(aes)", .drv_name = "cbc-aes", .bsize = 16,
.ablkcipher = {
.ivsize = HIFN_AES_IV_LENGTH,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = hifn_setkey,
......@@ -2472,7 +2468,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "cfb(aes)", .drv_name = "hifn-aes", .bsize = 16,
.name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16,
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
......@@ -2482,7 +2478,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
},
},
{
.name = "ofb(aes)", .drv_name = "hifn-aes", .bsize = 16,
.name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16,
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
......@@ -2514,15 +2510,14 @@ static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
return -ENOMEM;
snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", t->drv_name);
snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s",
t->drv_name, dev->name);
alg->alg.cra_priority = 300;
alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
alg->alg.cra_blocksize = t->bsize;
alg->alg.cra_ctxsize = sizeof(struct hifn_context);
alg->alg.cra_alignmask = 15;
if (t->bsize == 8)
alg->alg.cra_alignmask = 3;
alg->alg.cra_alignmask = 0;
alg->alg.cra_type = &crypto_ablkcipher_type;
alg->alg.cra_module = THIS_MODULE;
alg->alg.cra_u.ablkcipher = t->ablkcipher;
......
/*
* Intel IXP4xx NPE-C crypto driver
*
* Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/rtnetlink.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <crypto/ctr.h>
#include <crypto/des.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
#include <crypto/algapi.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
#include <asm/arch/npe.h>
#include <asm/arch/qmgr.h>
#define MAX_KEYLEN 32
/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
#define NPE_CTX_LEN 80
#define AES_BLOCK128 16
#define NPE_OP_HASH_VERIFY 0x01
#define NPE_OP_CCM_ENABLE 0x04
#define NPE_OP_CRYPT_ENABLE 0x08
#define NPE_OP_HASH_ENABLE 0x10
#define NPE_OP_NOT_IN_PLACE 0x20
#define NPE_OP_HMAC_DISABLE 0x40
#define NPE_OP_CRYPT_ENCRYPT 0x80
#define NPE_OP_CCM_GEN_MIC 0xcc
#define NPE_OP_HASH_GEN_ICV 0x50
#define NPE_OP_ENC_GEN_KEY 0xc9
#define MOD_ECB 0x0000
#define MOD_CTR 0x1000
#define MOD_CBC_ENC 0x2000
#define MOD_CBC_DEC 0x3000
#define MOD_CCM_ENC 0x4000
#define MOD_CCM_DEC 0x5000
#define KEYLEN_128 4
#define KEYLEN_192 6
#define KEYLEN_256 8
#define CIPH_DECR 0x0000
#define CIPH_ENCR 0x0400
#define MOD_DES 0x0000
#define MOD_TDEA2 0x0100
#define MOD_3DES 0x0200
#define MOD_AES 0x0800
#define MOD_AES128 (0x0800 | KEYLEN_128)
#define MOD_AES192 (0x0900 | KEYLEN_192)
#define MOD_AES256 (0x0a00 | KEYLEN_256)
#define MAX_IVLEN 16
#define NPE_ID 2 /* NPE C */
#define NPE_QLEN 16
/* Space for registering when the first
* NPE_QLEN crypt_ctl are busy */
#define NPE_QLEN_TOTAL 64
#define SEND_QID 29
#define RECV_QID 30
#define CTL_FLAG_UNUSED 0x0000
#define CTL_FLAG_USED 0x1000
#define CTL_FLAG_PERFORM_ABLK 0x0001
#define CTL_FLAG_GEN_ICV 0x0002
#define CTL_FLAG_GEN_REVAES 0x0004
#define CTL_FLAG_PERFORM_AEAD 0x0008
#define CTL_FLAG_MASK 0x000f
#define HMAC_IPAD_VALUE 0x36
#define HMAC_OPAD_VALUE 0x5C
#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
#define MD5_DIGEST_SIZE 16
struct buffer_desc {
u32 phys_next;
u16 buf_len;
u16 pkt_len;
u32 phys_addr;
u32 __reserved[4];
struct buffer_desc *next;
};
struct crypt_ctl {
u8 mode; /* NPE_OP_* operation mode */
u8 init_len;
u16 reserved;
u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
u32 icv_rev_aes; /* icv or rev aes */
u32 src_buf;
u32 dst_buf;
u16 auth_offs; /* Authentication start offset */
u16 auth_len; /* Authentication data length */
u16 crypt_offs; /* Cryption start offset */
u16 crypt_len; /* Cryption data length */
u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
u32 crypto_ctx; /* NPE Crypto Param structure address */
/* Used by Host: 4*4 bytes*/
unsigned ctl_flags;
union {
struct ablkcipher_request *ablk_req;
struct aead_request *aead_req;
struct crypto_tfm *tfm;
} data;
struct buffer_desc *regist_buf;
u8 *regist_ptr;
};
struct ablk_ctx {
struct buffer_desc *src;
struct buffer_desc *dst;
unsigned src_nents;
unsigned dst_nents;
};
struct aead_ctx {
struct buffer_desc *buffer;
unsigned short assoc_nents;
unsigned short src_nents;
struct scatterlist ivlist;
/* used when the hmac is not on one sg entry */
u8 *hmac_virt;
int encrypt;
};
struct ix_hash_algo {
u32 cfgword;
unsigned char *icv;
};
struct ix_sa_dir {
unsigned char *npe_ctx;
dma_addr_t npe_ctx_phys;
int npe_ctx_idx;
u8 npe_mode;
};
struct ixp_ctx {
struct ix_sa_dir encrypt;
struct ix_sa_dir decrypt;
int authkey_len;
u8 authkey[MAX_KEYLEN];
int enckey_len;
u8 enckey[MAX_KEYLEN];
u8 salt[MAX_IVLEN];
u8 nonce[CTR_RFC3686_NONCE_SIZE];
unsigned salted;
atomic_t configuring;
struct completion completion;
};
struct ixp_alg {
struct crypto_alg crypto;
const struct ix_hash_algo *hash;
u32 cfg_enc;
u32 cfg_dec;
int registered;
};
static const struct ix_hash_algo hash_alg_md5 = {
.cfgword = 0xAA010004,
.icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
"\xFE\xDC\xBA\x98\x76\x54\x32\x10",
};
static const struct ix_hash_algo hash_alg_sha1 = {
.cfgword = 0x00000005,
.icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
"\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
};
static struct npe *npe_c;
static struct dma_pool *buffer_pool = NULL;
static struct dma_pool *ctx_pool = NULL;
static struct crypt_ctl *crypt_virt = NULL;
static dma_addr_t crypt_phys;
static int support_aes = 1;
static void dev_release(struct device *dev)
{
return;
}
#define DRIVER_NAME "ixp4xx_crypto"
static struct platform_device pseudo_dev = {
.name = DRIVER_NAME,
.id = 0,
.num_resources = 0,
.dev = {
.coherent_dma_mask = DMA_32BIT_MASK,
.release = dev_release,
}
};
static struct device *dev = &pseudo_dev.dev;
static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
{
return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
}
static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
{
return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
}
static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
{
return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
}
static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
{
return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
}
static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
{
return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
}
static int setup_crypt_desc(void)
{
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
crypt_virt = dma_alloc_coherent(dev,
NPE_QLEN * sizeof(struct crypt_ctl),
&crypt_phys, GFP_KERNEL);
if (!crypt_virt)
return -ENOMEM;
memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
return 0;
}
static spinlock_t desc_lock;
static struct crypt_ctl *get_crypt_desc(void)
{
int i;
static int idx = 0;
unsigned long flags;
spin_lock_irqsave(&desc_lock, flags);
if (unlikely(!crypt_virt))
setup_crypt_desc();
if (unlikely(!crypt_virt)) {
spin_unlock_irqrestore(&desc_lock, flags);
return NULL;
}
i = idx;
if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
if (++idx >= NPE_QLEN)
idx = 0;
crypt_virt[i].ctl_flags = CTL_FLAG_USED;
spin_unlock_irqrestore(&desc_lock, flags);
return crypt_virt +i;
} else {
spin_unlock_irqrestore(&desc_lock, flags);
return NULL;
}
}
static spinlock_t emerg_lock;
static struct crypt_ctl *get_crypt_desc_emerg(void)
{
int i;
static int idx = NPE_QLEN;
struct crypt_ctl *desc;
unsigned long flags;
desc = get_crypt_desc();
if (desc)
return desc;
if (unlikely(!crypt_virt))
return NULL;
spin_lock_irqsave(&emerg_lock, flags);
i = idx;
if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
if (++idx >= NPE_QLEN_TOTAL)
idx = NPE_QLEN;
crypt_virt[i].ctl_flags = CTL_FLAG_USED;
spin_unlock_irqrestore(&emerg_lock, flags);
return crypt_virt +i;
} else {
spin_unlock_irqrestore(&emerg_lock, flags);
return NULL;
}
}
static void free_buf_chain(struct buffer_desc *buf, u32 phys)
{
while (buf) {
struct buffer_desc *buf1;
u32 phys1;
buf1 = buf->next;
phys1 = buf->phys_next;
dma_pool_free(buffer_pool, buf, phys);
buf = buf1;
phys = phys1;
}
}
static struct tasklet_struct crypto_done_tasklet;
static void finish_scattered_hmac(struct crypt_ctl *crypt)
{
struct aead_request *req = crypt->data.aead_req;
struct aead_ctx *req_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
int authsize = crypto_aead_authsize(tfm);
int decryptlen = req->cryptlen - authsize;
if (req_ctx->encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, decryptlen, authsize, 1);
}
dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
}
static void one_packet(dma_addr_t phys)
{
struct crypt_ctl *crypt;
struct ixp_ctx *ctx;
int failed;
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
failed = phys & 0x1 ? -EBADMSG : 0;
phys &= ~0x3;
crypt = crypt_phys2virt(phys);
switch (crypt->ctl_flags & CTL_FLAG_MASK) {
case CTL_FLAG_PERFORM_AEAD: {
struct aead_request *req = crypt->data.aead_req;
struct aead_ctx *req_ctx = aead_request_ctx(req);
dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents,
DMA_TO_DEVICE);
dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
dma_unmap_sg(dev, req->src, req_ctx->src_nents,
DMA_BIDIRECTIONAL);
free_buf_chain(req_ctx->buffer, crypt->src_buf);
if (req_ctx->hmac_virt) {
finish_scattered_hmac(crypt);
}
req->base.complete(&req->base, failed);
break;
}
case CTL_FLAG_PERFORM_ABLK: {
struct ablkcipher_request *req = crypt->data.ablk_req;
struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
int nents;
if (req_ctx->dst) {
nents = req_ctx->dst_nents;
dma_unmap_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
free_buf_chain(req_ctx->dst, crypt->dst_buf);
src_direction = DMA_TO_DEVICE;
}
nents = req_ctx->src_nents;
dma_unmap_sg(dev, req->src, nents, src_direction);
free_buf_chain(req_ctx->src, crypt->src_buf);
req->base.complete(&req->base, failed);
break;
}
case CTL_FLAG_GEN_ICV:
ctx = crypto_tfm_ctx(crypt->data.tfm);
dma_pool_free(ctx_pool, crypt->regist_ptr,
crypt->regist_buf->phys_addr);
dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
if (atomic_dec_and_test(&ctx->configuring))
complete(&ctx->completion);
break;
case CTL_FLAG_GEN_REVAES:
ctx = crypto_tfm_ctx(crypt->data.tfm);
*(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
if (atomic_dec_and_test(&ctx->configuring))
complete(&ctx->completion);
break;
default:
BUG();
}
crypt->ctl_flags = CTL_FLAG_UNUSED;
}
static void irqhandler(void *_unused)
{
tasklet_schedule(&crypto_done_tasklet);
}
static void crypto_done_action(unsigned long arg)
{
int i;
for(i=0; i<4; i++) {
dma_addr_t phys = qmgr_get_entry(RECV_QID);
if (!phys)
return;
one_packet(phys);
}
tasklet_schedule(&crypto_done_tasklet);
}
static int init_ixp_crypto(void)
{
int ret = -ENODEV;
if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
return ret;
}
npe_c = npe_request(NPE_ID);
if (!npe_c)
return ret;
if (!npe_running(npe_c)) {
npe_load_firmware(npe_c, npe_name(npe_c), dev);
}
/* buffer_pool will also be used to sometimes store the hmac,
* so assure it is large enough
*/
BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
buffer_pool = dma_pool_create("buffer", dev,
sizeof(struct buffer_desc), 32, 0);
ret = -ENOMEM;
if (!buffer_pool) {
goto err;
}
ctx_pool = dma_pool_create("context", dev,
NPE_CTX_LEN, 16, 0);
if (!ctx_pool) {
goto err;
}
ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0);
if (ret)
goto err;
ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0);
if (ret) {
qmgr_release_queue(SEND_QID);
goto err;
}
qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
qmgr_enable_irq(RECV_QID);
return 0;
err:
if (ctx_pool)
dma_pool_destroy(ctx_pool);
if (buffer_pool)
dma_pool_destroy(buffer_pool);
npe_release(npe_c);
return ret;
}
static void release_ixp_crypto(void)
{
qmgr_disable_irq(RECV_QID);
tasklet_kill(&crypto_done_tasklet);
qmgr_release_queue(SEND_QID);
qmgr_release_queue(RECV_QID);
dma_pool_destroy(ctx_pool);
dma_pool_destroy(buffer_pool);
npe_release(npe_c);
if (crypt_virt) {
dma_free_coherent(dev,
NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
crypt_virt, crypt_phys);
}
return;
}
static void reset_sa_dir(struct ix_sa_dir *dir)
{
memset(dir->npe_ctx, 0, NPE_CTX_LEN);
dir->npe_ctx_idx = 0;
dir->npe_mode = 0;
}
static int init_sa_dir(struct ix_sa_dir *dir)
{
dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
if (!dir->npe_ctx) {
return -ENOMEM;
}
reset_sa_dir(dir);
return 0;
}
static void free_sa_dir(struct ix_sa_dir *dir)
{
memset(dir->npe_ctx, 0, NPE_CTX_LEN);
dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
}
static int init_tfm(struct crypto_tfm *tfm)
{
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
atomic_set(&ctx->configuring, 0);
ret = init_sa_dir(&ctx->encrypt);
if (ret)
return ret;
ret = init_sa_dir(&ctx->decrypt);
if (ret) {
free_sa_dir(&ctx->encrypt);
}
return ret;
}
static int init_tfm_ablk(struct crypto_tfm *tfm)
{
tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
return init_tfm(tfm);
}
static int init_tfm_aead(struct crypto_tfm *tfm)
{
tfm->crt_aead.reqsize = sizeof(struct aead_ctx);
return init_tfm(tfm);
}
static void exit_tfm(struct crypto_tfm *tfm)
{
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
free_sa_dir(&ctx->encrypt);
free_sa_dir(&ctx->decrypt);
}
static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
int init_len, u32 ctx_addr, const u8 *key, int key_len)
{
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypt_ctl *crypt;
struct buffer_desc *buf;
int i;
u8 *pad;
u32 pad_phys, buf_phys;
BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
if (!pad)
return -ENOMEM;
buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
if (!buf) {
dma_pool_free(ctx_pool, pad, pad_phys);
return -ENOMEM;
}
crypt = get_crypt_desc_emerg();
if (!crypt) {
dma_pool_free(ctx_pool, pad, pad_phys);
dma_pool_free(buffer_pool, buf, buf_phys);
return -EAGAIN;
}
memcpy(pad, key, key_len);
memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
pad[i] ^= xpad;
}
crypt->data.tfm = tfm;
crypt->regist_ptr = pad;
crypt->regist_buf = buf;
crypt->auth_offs = 0;
crypt->auth_len = HMAC_PAD_BLOCKLEN;
crypt->crypto_ctx = ctx_addr;
crypt->src_buf = buf_phys;
crypt->icv_rev_aes = target;
crypt->mode = NPE_OP_HASH_GEN_ICV;
crypt->init_len = init_len;
crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
buf->next = 0;
buf->buf_len = HMAC_PAD_BLOCKLEN;
buf->pkt_len = 0;
buf->phys_addr = pad_phys;
atomic_inc(&ctx->configuring);
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(SEND_QID));
return 0;
}
static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
const u8 *key, int key_len, unsigned digest_len)
{
u32 itarget, otarget, npe_ctx_addr;
unsigned char *cinfo;
int init_len, ret = 0;
u32 cfgword;
struct ix_sa_dir *dir;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
const struct ix_hash_algo *algo;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
cinfo = dir->npe_ctx + dir->npe_ctx_idx;
algo = ix_hash(tfm);
/* write cfg word to cryptinfo */
cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
*(u32*)cinfo = cpu_to_be32(cfgword);
cinfo += sizeof(cfgword);
/* write ICV to cryptinfo */
memcpy(cinfo, algo->icv, digest_len);
cinfo += digest_len;
itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
+ sizeof(algo->cfgword);
otarget = itarget + digest_len;
init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
dir->npe_ctx_idx += init_len;
dir->npe_mode |= NPE_OP_HASH_ENABLE;
if (!encrypt)
dir->npe_mode |= NPE_OP_HASH_VERIFY;
ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
init_len, npe_ctx_addr, key, key_len);
if (ret)
return ret;
return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
init_len, npe_ctx_addr, key, key_len);
}
static int gen_rev_aes_key(struct crypto_tfm *tfm)
{
struct crypt_ctl *crypt;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
struct ix_sa_dir *dir = &ctx->decrypt;
crypt = get_crypt_desc_emerg();
if (!crypt) {
return -EAGAIN;
}
*(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
crypt->data.tfm = tfm;
crypt->crypt_offs = 0;
crypt->crypt_len = AES_BLOCK128;
crypt->src_buf = 0;
crypt->crypto_ctx = dir->npe_ctx_phys;
crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
crypt->mode = NPE_OP_ENC_GEN_KEY;
crypt->init_len = dir->npe_ctx_idx;
crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
atomic_inc(&ctx->configuring);
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(SEND_QID));
return 0;
}
static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
const u8 *key, int key_len)
{
u8 *cinfo;
u32 cipher_cfg;
u32 keylen_cfg = 0;
struct ix_sa_dir *dir;
struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
cinfo = dir->npe_ctx;
if (encrypt) {
cipher_cfg = cipher_cfg_enc(tfm);
dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
} else {
cipher_cfg = cipher_cfg_dec(tfm);
}
if (cipher_cfg & MOD_AES) {
switch (key_len) {
case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
default:
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
cipher_cfg |= keylen_cfg;
} else if (cipher_cfg & MOD_3DES) {
const u32 *K = (const u32 *)key;
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))))
{
*flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
return -EINVAL;
}
} else {
u32 tmp[DES_EXPKEY_WORDS];
if (des_ekey(tmp, key) == 0) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
}
}
/* write cfg word to cryptinfo */
*(u32*)cinfo = cpu_to_be32(cipher_cfg);
cinfo += sizeof(cipher_cfg);
/* write cipher key to cryptinfo */
memcpy(cinfo, key, key_len);
/* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
key_len = DES3_EDE_KEY_SIZE;
}
dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
if ((cipher_cfg & MOD_AES) && !encrypt) {
return gen_rev_aes_key(tfm);
}
return 0;
}
static int count_sg(struct scatterlist *sg, int nbytes)
{
int i;
for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
nbytes -= sg->length;
return i;
}
static struct buffer_desc *chainup_buffers(struct scatterlist *sg,
unsigned nbytes, struct buffer_desc *buf, gfp_t flags)
{
int nents = 0;
while (nbytes > 0) {
struct buffer_desc *next_buf;
u32 next_buf_phys;
unsigned len = min(nbytes, sg_dma_len(sg));
nents++;
nbytes -= len;
if (!buf->phys_addr) {
buf->phys_addr = sg_dma_address(sg);
buf->buf_len = len;
buf->next = NULL;
buf->phys_next = 0;
goto next;
}
/* Two consecutive chunks on one page may be handled by the old
* buffer descriptor, increased by the length of the new one
*/
if (sg_dma_address(sg) == buf->phys_addr + buf->buf_len) {
buf->buf_len += len;
goto next;
}
next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
if (!next_buf)
return NULL;
buf->next = next_buf;
buf->phys_next = next_buf_phys;
buf = next_buf;
buf->next = NULL;
buf->phys_next = 0;
buf->phys_addr = sg_dma_address(sg);
buf->buf_len = len;
next:
if (nbytes > 0) {
sg = sg_next(sg);
}
}
return buf;
}
static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
u32 *flags = &tfm->base.crt_flags;
int ret;
init_completion(&ctx->completion);
atomic_inc(&ctx->configuring);
reset_sa_dir(&ctx->encrypt);
reset_sa_dir(&ctx->decrypt);
ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
ret = setup_cipher(&tfm->base, 0, key, key_len);
if (ret)
goto out;
ret = setup_cipher(&tfm->base, 1, key, key_len);
if (ret)
goto out;
if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
ret = -EINVAL;
} else {
*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
}
}
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
return ret;
}
static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
/* the nonce is stored in bytes at end of key */
if (key_len < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
CTR_RFC3686_NONCE_SIZE);
key_len -= CTR_RFC3686_NONCE_SIZE;
return ablk_setkey(tfm, key, key_len);
}
static int ablk_perform(struct ablkcipher_request *req, int encrypt)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
int ret = -ENOMEM;
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
unsigned int nbytes = req->nbytes, nents;
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
if (qmgr_stat_full(SEND_QID))
return -EAGAIN;
if (atomic_read(&ctx->configuring))
return -EAGAIN;
dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
crypt = get_crypt_desc();
if (!crypt)
return ret;
crypt->data.ablk_req = req;
crypt->crypto_ctx = dir->npe_ctx_phys;
crypt->mode = dir->npe_mode;
crypt->init_len = dir->npe_ctx_idx;
crypt->crypt_offs = 0;
crypt->crypt_len = nbytes;
BUG_ON(ivsize && !req->info);
memcpy(crypt->iv, req->info, ivsize);
if (req->src != req->dst) {
crypt->mode |= NPE_OP_NOT_IN_PLACE;
nents = count_sg(req->dst, nbytes);
/* This was never tested by Intel
* for more than one dst buffer, I think. */
BUG_ON(nents != 1);
req_ctx->dst_nents = nents;
dma_map_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
req_ctx->dst = dma_pool_alloc(buffer_pool, flags,&crypt->dst_buf);
if (!req_ctx->dst)
goto unmap_sg_dest;
req_ctx->dst->phys_addr = 0;
if (!chainup_buffers(req->dst, nbytes, req_ctx->dst, flags))
goto free_buf_dest;
src_direction = DMA_TO_DEVICE;
} else {
req_ctx->dst = NULL;
req_ctx->dst_nents = 0;
}
nents = count_sg(req->src, nbytes);
req_ctx->src_nents = nents;
dma_map_sg(dev, req->src, nents, src_direction);
req_ctx->src = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
if (!req_ctx->src)
goto unmap_sg_src;
req_ctx->src->phys_addr = 0;
if (!chainup_buffers(req->src, nbytes, req_ctx->src, flags))
goto free_buf_src;
crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(SEND_QID));
return -EINPROGRESS;
free_buf_src:
free_buf_chain(req_ctx->src, crypt->src_buf);
unmap_sg_src:
dma_unmap_sg(dev, req->src, req_ctx->src_nents, src_direction);
free_buf_dest:
if (req->src != req->dst) {
free_buf_chain(req_ctx->dst, crypt->dst_buf);
unmap_sg_dest:
dma_unmap_sg(dev, req->src, req_ctx->dst_nents,
DMA_FROM_DEVICE);
}
crypt->ctl_flags = CTL_FLAG_UNUSED;
return ret;
}
static int ablk_encrypt(struct ablkcipher_request *req)
{
return ablk_perform(req, 1);
}
static int ablk_decrypt(struct ablkcipher_request *req)
{
return ablk_perform(req, 0);
}
static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
u8 iv[CTR_RFC3686_BLOCK_SIZE];
u8 *info = req->info;
int ret;
/* set up counter block */
memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
/* initialize counter portion of counter block */
*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
cpu_to_be32(1);
req->info = iv;
ret = ablk_perform(req, 1);
req->info = info;
return ret;
}
static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
unsigned int nbytes)
{
int offset = 0;
if (!nbytes)
return 0;
for (;;) {
if (start < offset + sg->length)
break;
offset += sg->length;
sg = sg_next(sg);
}
return (start + nbytes > offset + sg->length);
}
static int aead_perform(struct aead_request *req, int encrypt,
int cryptoffset, int eff_cryptlen, u8 *iv)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
unsigned ivsize = crypto_aead_ivsize(tfm);
unsigned authsize = crypto_aead_authsize(tfm);
int ret = -ENOMEM;
struct ix_sa_dir *dir;
struct crypt_ctl *crypt;
unsigned int cryptlen, nents;
struct buffer_desc *buf;
struct aead_ctx *req_ctx = aead_request_ctx(req);
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
if (qmgr_stat_full(SEND_QID))
return -EAGAIN;
if (atomic_read(&ctx->configuring))
return -EAGAIN;
if (encrypt) {
dir = &ctx->encrypt;
cryptlen = req->cryptlen;
} else {
dir = &ctx->decrypt;
/* req->cryptlen includes the authsize when decrypting */
cryptlen = req->cryptlen -authsize;
eff_cryptlen -= authsize;
}
crypt = get_crypt_desc();
if (!crypt)
return ret;
crypt->data.aead_req = req;
crypt->crypto_ctx = dir->npe_ctx_phys;
crypt->mode = dir->npe_mode;
crypt->init_len = dir->npe_ctx_idx;
crypt->crypt_offs = cryptoffset;
crypt->crypt_len = eff_cryptlen;
crypt->auth_offs = 0;
crypt->auth_len = req->assoclen + ivsize + cryptlen;
BUG_ON(ivsize && !req->iv);
memcpy(crypt->iv, req->iv, ivsize);
if (req->src != req->dst) {
BUG(); /* -ENOTSUP because of my lazyness */
}
req_ctx->buffer = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
if (!req_ctx->buffer)
goto out;
req_ctx->buffer->phys_addr = 0;
/* ASSOC data */
nents = count_sg(req->assoc, req->assoclen);
req_ctx->assoc_nents = nents;
dma_map_sg(dev, req->assoc, nents, DMA_TO_DEVICE);
buf = chainup_buffers(req->assoc, req->assoclen, req_ctx->buffer,flags);
if (!buf)
goto unmap_sg_assoc;
/* IV */
sg_init_table(&req_ctx->ivlist, 1);
sg_set_buf(&req_ctx->ivlist, iv, ivsize);
dma_map_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
buf = chainup_buffers(&req_ctx->ivlist, ivsize, buf, flags);
if (!buf)
goto unmap_sg_iv;
if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
/* The 12 hmac bytes are scattered,
* we need to copy them into a safe buffer */
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
&crypt->icv_rev_aes);
if (unlikely(!req_ctx->hmac_virt))
goto unmap_sg_iv;
if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, cryptlen, authsize, 0);
}
req_ctx->encrypt = encrypt;
} else {
req_ctx->hmac_virt = NULL;
}
/* Crypt */
nents = count_sg(req->src, cryptlen + authsize);
req_ctx->src_nents = nents;
dma_map_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
buf = chainup_buffers(req->src, cryptlen + authsize, buf, flags);
if (!buf)
goto unmap_sg_src;
if (!req_ctx->hmac_virt) {
crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
}
crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
BUG_ON(qmgr_stat_overflow(SEND_QID));
return -EINPROGRESS;
unmap_sg_src:
dma_unmap_sg(dev, req->src, req_ctx->src_nents, DMA_BIDIRECTIONAL);
if (req_ctx->hmac_virt) {
dma_pool_free(buffer_pool, req_ctx->hmac_virt,
crypt->icv_rev_aes);
}
unmap_sg_iv:
dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
unmap_sg_assoc:
dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, DMA_TO_DEVICE);
free_buf_chain(req_ctx->buffer, crypt->src_buf);
out:
crypt->ctl_flags = CTL_FLAG_UNUSED;
return ret;
}
static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
u32 *flags = &tfm->base.crt_flags;
unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize;
int ret;
if (!ctx->enckey_len && !ctx->authkey_len)
return 0;
init_completion(&ctx->completion);
atomic_inc(&ctx->configuring);
reset_sa_dir(&ctx->encrypt);
reset_sa_dir(&ctx->decrypt);
ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
if (ret)
goto out;
ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
if (ret)
goto out;
ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
ctx->authkey_len, digest_len);
if (ret)
goto out;
ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
ctx->authkey_len, digest_len);
if (ret)
goto out;
if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
ret = -EINVAL;
goto out;
} else {
*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
}
}
out:
if (!atomic_dec_and_test(&ctx->configuring))
wait_for_completion(&ctx->completion);
return ret;
}
static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
int max = crypto_aead_alg(tfm)->maxauthsize >> 2;
if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
return -EINVAL;
return aead_setup(tfm, authsize);
}
static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
struct rtattr *rta = (struct rtattr *)key;
struct crypto_authenc_key_param *param;
if (!RTA_OK(rta, keylen))
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey;
param = RTA_DATA(rta);
ctx->enckey_len = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < ctx->enckey_len)
goto badkey;
ctx->authkey_len = keylen - ctx->enckey_len;
memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
memcpy(ctx->authkey, key, ctx->authkey_len);
return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey:
ctx->enckey_len = 0;
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
static int aead_encrypt(struct aead_request *req)
{
unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
return aead_perform(req, 1, req->assoclen + ivsize,
req->cryptlen, req->iv);
}
static int aead_decrypt(struct aead_request *req)
{
unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
return aead_perform(req, 0, req->assoclen + ivsize,
req->cryptlen, req->iv);
}
static int aead_givencrypt(struct aead_givcrypt_request *req)
{
struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
unsigned len, ivsize = crypto_aead_ivsize(tfm);
__be64 seq;
/* copied from eseqiv.c */
if (!ctx->salted) {
get_random_bytes(ctx->salt, ivsize);
ctx->salted = 1;
}
memcpy(req->areq.iv, ctx->salt, ivsize);
len = ivsize;
if (ivsize > sizeof(u64)) {
memset(req->giv, 0, ivsize - sizeof(u64));
len = sizeof(u64);
}
seq = cpu_to_be64(req->seq);
memcpy(req->giv + ivsize - len, &seq, len);
return aead_perform(&req->areq, 1, req->areq.assoclen,
req->areq.cryptlen +ivsize, req->giv);
}
static struct ixp_alg ixp4xx_algos[] = {
{
.crypto = {
.cra_name = "cbc(des)",
.cra_blocksize = DES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.geniv = "eseqiv",
}
}
},
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "ecb(des)",
.cra_blocksize = DES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
}
}
},
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
}, {
.crypto = {
.cra_name = "cbc(des3_ede)",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
.geniv = "eseqiv",
}
}
},
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "ecb(des3_ede)",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
}
}
},
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
}, {
.crypto = {
.cra_name = "cbc(aes)",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.geniv = "eseqiv",
}
}
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
}, {
.crypto = {
.cra_name = "ecb(aes)",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
}
}
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
}, {
.crypto = {
.cra_name = "ctr(aes)",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.geniv = "eseqiv",
}
}
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
}, {
.crypto = {
.cra_name = "rfc3686(ctr(aes))",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.geniv = "eseqiv",
.setkey = ablk_rfc3686_setkey,
.encrypt = ablk_rfc3686_crypt,
.decrypt = ablk_rfc3686_crypt }
}
},
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
}, {
.crypto = {
.cra_name = "authenc(hmac(md5),cbc(des))",
.cra_blocksize = DES_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(des))",
.cra_blocksize = DES_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
}, {
.crypto = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
}, {
.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_u = { .aead = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
}
}
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
} };
#define IXP_POSTFIX "-ixp4xx"
static int __init ixp_module_init(void)
{
int num = ARRAY_SIZE(ixp4xx_algos);
int i,err ;
if (platform_device_register(&pseudo_dev))
return -ENODEV;
spin_lock_init(&desc_lock);
spin_lock_init(&emerg_lock);
err = init_ixp_crypto();
if (err) {
platform_device_unregister(&pseudo_dev);
return err;
}
for (i=0; i< num; i++) {
struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s"IXP_POSTFIX, cra->cra_name) >=
CRYPTO_MAX_ALG_NAME)
{
continue;
}
if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
continue;
}
if (!ixp4xx_algos[i].hash) {
/* block ciphers */
cra->cra_type = &crypto_ablkcipher_type;
cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC;
if (!cra->cra_ablkcipher.setkey)
cra->cra_ablkcipher.setkey = ablk_setkey;
if (!cra->cra_ablkcipher.encrypt)
cra->cra_ablkcipher.encrypt = ablk_encrypt;
if (!cra->cra_ablkcipher.decrypt)
cra->cra_ablkcipher.decrypt = ablk_decrypt;
cra->cra_init = init_tfm_ablk;
} else {
/* authenc */
cra->cra_type = &crypto_aead_type;
cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
CRYPTO_ALG_ASYNC;
cra->cra_aead.setkey = aead_setkey;
cra->cra_aead.setauthsize = aead_setauthsize;
cra->cra_aead.encrypt = aead_encrypt;
cra->cra_aead.decrypt = aead_decrypt;
cra->cra_aead.givencrypt = aead_givencrypt;
cra->cra_init = init_tfm_aead;
}
cra->cra_ctxsize = sizeof(struct ixp_ctx);
cra->cra_module = THIS_MODULE;
cra->cra_alignmask = 3;
cra->cra_priority = 300;
cra->cra_exit = exit_tfm;
if (crypto_register_alg(cra))
printk(KERN_ERR "Failed to register '%s'\n",
cra->cra_name);
else
ixp4xx_algos[i].registered = 1;
}
return 0;
}
static void __exit ixp_module_exit(void)
{
int num = ARRAY_SIZE(ixp4xx_algos);
int i;
for (i=0; i< num; i++) {
if (ixp4xx_algos[i].registered)
crypto_unregister_alg(&ixp4xx_algos[i].crypto);
}
release_ixp_crypto();
platform_device_unregister(&pseudo_dev);
}
module_init(ixp_module_init);
module_exit(ixp_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
MODULE_DESCRIPTION("IXP4xx hardware crypto");
......@@ -385,12 +385,12 @@ static int __init padlock_init(void)
int ret;
if (!cpu_has_xcrypt) {
printk(KERN_ERR PFX "VIA PadLock not detected.\n");
printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
return -ENODEV;
}
if (!cpu_has_xcrypt_enabled) {
printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
return -ENODEV;
}
......
......@@ -254,12 +254,12 @@ static int __init padlock_init(void)
int rc = -ENODEV;
if (!cpu_has_phe) {
printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n");
printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
return -ENODEV;
}
if (!cpu_has_phe_enabled) {
printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
return -ENODEV;
}
......
/*
* talitos - Freescale Integrated Security Engine (SEC) device driver
*
* Copyright (c) 2008 Freescale Semiconductor, Inc.
*
* Scatterlist Crypto API glue code copied from files with the following:
* Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
*
* Crypto algorithm registration code copied from hifn driver:
* 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/crypto.h>
#include <linux/hw_random.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/sha.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include "talitos.h"
#define TALITOS_TIMEOUT 100000
#define TALITOS_MAX_DATA_LEN 65535
#define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
#define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
#define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
/* descriptor pointer entry */
struct talitos_ptr {
__be16 len; /* length */
u8 j_extent; /* jump to sg link table and/or extent */
u8 eptr; /* extended address */
__be32 ptr; /* address */
};
/* descriptor */
struct talitos_desc {
__be32 hdr; /* header high bits */
__be32 hdr_lo; /* header low bits */
struct talitos_ptr ptr[7]; /* ptr/len pair array */
};
/**
* talitos_request - descriptor submission request
* @desc: descriptor pointer (kernel virtual)
* @dma_desc: descriptor's physical bus address
* @callback: whom to call when descriptor processing is done
* @context: caller context (optional)
*/
struct talitos_request {
struct talitos_desc *desc;
dma_addr_t dma_desc;
void (*callback) (struct device *dev, struct talitos_desc *desc,
void *context, int error);
void *context;
};
struct talitos_private {
struct device *dev;
struct of_device *ofdev;
void __iomem *reg;
int irq;
/* SEC version geometry (from device tree node) */
unsigned int num_channels;
unsigned int chfifo_len;
unsigned int exec_units;
unsigned int desc_types;
/* next channel to be assigned next incoming descriptor */
atomic_t last_chan;
/* per-channel request fifo */
struct talitos_request **fifo;
/*
* length of the request fifo
* fifo_len is chfifo_len rounded up to next power of 2
* so we can use bitwise ops to wrap
*/
unsigned int fifo_len;
/* per-channel index to next free descriptor request */
int *head;
/* per-channel index to next in-progress/done descriptor request */
int *tail;
/* per-channel request submission (head) and release (tail) locks */
spinlock_t *head_lock;
spinlock_t *tail_lock;
/* request callback tasklet */
struct tasklet_struct done_task;
struct tasklet_struct error_task;
/* list of registered algorithms */
struct list_head alg_list;
/* hwrng device */
struct hwrng rng;
};
/*
* map virtual single (contiguous) pointer to h/w descriptor pointer
*/
static void map_single_talitos_ptr(struct device *dev,
struct talitos_ptr *talitos_ptr,
unsigned short len, void *data,
unsigned char extent,
enum dma_data_direction dir)
{
talitos_ptr->len = cpu_to_be16(len);
talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir));
talitos_ptr->j_extent = extent;
}
/*
* unmap bus single (contiguous) h/w descriptor pointer
*/
static void unmap_single_talitos_ptr(struct device *dev,
struct talitos_ptr *talitos_ptr,
enum dma_data_direction dir)
{
dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr),
be16_to_cpu(talitos_ptr->len), dir);
}
static int reset_channel(struct device *dev, int ch)
{
struct talitos_private *priv = dev_get_drvdata(dev);
unsigned int timeout = TALITOS_TIMEOUT;
setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET);
while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET)
&& --timeout)
cpu_relax();
if (timeout == 0) {
dev_err(dev, "failed to reset channel %d\n", ch);
return -EIO;
}
/* set done writeback and IRQ */
setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE |
TALITOS_CCCR_LO_CDIE);
return 0;
}
static int reset_device(struct device *dev)
{
struct talitos_private *priv = dev_get_drvdata(dev);
unsigned int timeout = TALITOS_TIMEOUT;
setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR);
while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
&& --timeout)
cpu_relax();
if (timeout == 0) {
dev_err(dev, "failed to reset device\n");
return -EIO;
}
return 0;
}
/*
* Reset and initialize the device
*/
static int init_device(struct device *dev)
{
struct talitos_private *priv = dev_get_drvdata(dev);
int ch, err;
/*
* Master reset
* errata documentation: warning: certain SEC interrupts
* are not fully cleared by writing the MCR:SWR bit,
* set bit twice to completely reset
*/
err = reset_device(dev);
if (err)
return err;
err = reset_device(dev);
if (err)
return err;
/* reset channels */
for (ch = 0; ch < priv->num_channels; ch++) {
err = reset_channel(dev, ch);
if (err)
return err;
}
/* enable channel done and error interrupts */
setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
return 0;
}
/**
* talitos_submit - submits a descriptor to the device for processing
* @dev: the SEC device to be used
* @desc: the descriptor to be processed by the device
* @callback: whom to call when processing is complete
* @context: a handle for use by caller (optional)
*
* desc must contain valid dma-mapped (bus physical) address pointers.
* callback must check err and feedback in descriptor header
* for device processing status.
*/
static int talitos_submit(struct device *dev, struct talitos_desc *desc,
void (*callback)(struct device *dev,
struct talitos_desc *desc,
void *context, int error),
void *context)
{
struct talitos_private *priv = dev_get_drvdata(dev);
struct talitos_request *request;
unsigned long flags, ch;
int head;
/* select done notification */
desc->hdr |= DESC_HDR_DONE_NOTIFY;
/* emulate SEC's round-robin channel fifo polling scheme */
ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
spin_lock_irqsave(&priv->head_lock[ch], flags);
head = priv->head[ch];
request = &priv->fifo[ch][head];
if (request->desc) {
/* request queue is full */
spin_unlock_irqrestore(&priv->head_lock[ch], flags);
return -EAGAIN;
}
/* map descriptor and save caller data */
request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
DMA_BIDIRECTIONAL);
request->callback = callback;
request->context = context;
/* increment fifo head */
priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1);
smp_wmb();
request->desc = desc;
/* GO! */
wmb();
out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc);
spin_unlock_irqrestore(&priv->head_lock[ch], flags);
return -EINPROGRESS;
}
/*
* process what was done, notify callback of error if not
*/
static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
{
struct talitos_private *priv = dev_get_drvdata(dev);
struct talitos_request *request, saved_req;
unsigned long flags;
int tail, status;
spin_lock_irqsave(&priv->tail_lock[ch], flags);
tail = priv->tail[ch];
while (priv->fifo[ch][tail].desc) {
request = &priv->fifo[ch][tail];
/* descriptors with their done bits set don't get the error */
rmb();
if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
status = 0;
else
if (!error)
break;
else
status = error;
dma_unmap_single(dev, request->dma_desc,
sizeof(struct talitos_desc), DMA_BIDIRECTIONAL);
/* copy entries so we can call callback outside lock */
saved_req.desc = request->desc;
saved_req.callback = request->callback;
saved_req.context = request->context;
/* release request entry in fifo */
smp_wmb();
request->desc = NULL;
/* increment fifo tail */
priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1);
spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
saved_req.callback(dev, saved_req.desc, saved_req.context,
status);
/* channel may resume processing in single desc error case */
if (error && !reset_ch && status == error)
return;
spin_lock_irqsave(&priv->tail_lock[ch], flags);
tail = priv->tail[ch];
}
spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
}
/*
* process completed requests for channels that have done status
*/
static void talitos_done(unsigned long data)
{
struct device *dev = (struct device *)data;
struct talitos_private *priv = dev_get_drvdata(dev);
int ch;
for (ch = 0; ch < priv->num_channels; ch++)
flush_channel(dev, ch, 0, 0);
}
/*
* locate current (offending) descriptor
*/
static struct talitos_desc *current_desc(struct device *dev, int ch)
{
struct talitos_private *priv = dev_get_drvdata(dev);
int tail = priv->tail[ch];
dma_addr_t cur_desc;
cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
while (priv->fifo[ch][tail].dma_desc != cur_desc) {
tail = (tail + 1) & (priv->fifo_len - 1);
if (tail == priv->tail[ch]) {
dev_err(dev, "couldn't locate current descriptor\n");
return NULL;
}
}
return priv->fifo[ch][tail].desc;
}
/*
* user diagnostics; report root cause of error based on execution unit status
*/
static void report_eu_error(struct device *dev, int ch, struct talitos_desc *desc)
{
struct talitos_private *priv = dev_get_drvdata(dev);
int i;
switch (desc->hdr & DESC_HDR_SEL0_MASK) {
case DESC_HDR_SEL0_AFEU:
dev_err(dev, "AFEUISR 0x%08x_%08x\n",
in_be32(priv->reg + TALITOS_AFEUISR),
in_be32(priv->reg + TALITOS_AFEUISR_LO));
break;
case DESC_HDR_SEL0_DEU:
dev_err(dev, "DEUISR 0x%08x_%08x\n",
in_be32(priv->reg + TALITOS_DEUISR),
in_be32(priv->reg + TALITOS_DEUISR_LO));
break;
case DESC_HDR_SEL0_MDEUA:
case DESC_HDR_SEL0_MDEUB:
dev_err(dev, "MDEUISR 0x%08x_%08x\n",
in_be32(priv->reg + TALITOS_MDEUISR),
in_be32(priv->reg + TALITOS_MDEUISR_LO));
break;
case DESC_HDR_SEL0_RNG:
dev_err(dev, "RNGUISR 0x%08x_%08x\n",
in_be32(priv->reg + TALITOS_RNGUISR),
in_be32(priv->reg + TALITOS_RNGUISR_LO));
break;
case DESC_HDR_SEL0_PKEU:
dev_err(dev, "PKEUISR 0x%08x_%08x\n",
in_be32(priv->reg + TALITOS_PKEUISR),
in_be32(priv->reg + TALITOS_PKEUISR_LO));
break;
case DESC_HDR_SEL0_AESU:
dev_err(dev, "AESUISR 0x%08x_%08x\n",
in_be32(priv->reg + TALITOS_AESUISR),
in_be32(priv->reg + TALITOS_AESUISR_LO));
break;
case DESC_HDR_SEL0_CRCU:
dev_err(dev, "CRCUISR 0x%08x_%08x\n",
in_be32(priv->reg + TALITOS_CRCUISR),
in_be32(priv->reg + TALITOS_CRCUISR_LO));
break;
case DESC_HDR_SEL0_KEU:
dev_err(dev, "KEUISR 0x%08x_%08x\n",
in_be32(priv->reg + TALITOS_KEUISR),
in_be32(priv->reg + TALITOS_KEUISR_LO));
break;
}
switch (desc->hdr & DESC_HDR_SEL1_MASK) {
case DESC_HDR_SEL1_MDEUA:
case DESC_HDR_SEL1_MDEUB:
dev_err(dev, "MDEUISR 0x%08x_%08x\n",
in_be32(priv->reg + TALITOS_MDEUISR),
in_be32(priv->reg + TALITOS_MDEUISR_LO));
break;
case DESC_HDR_SEL1_CRCU:
dev_err(dev, "CRCUISR 0x%08x_%08x\n",
in_be32(priv->reg + TALITOS_CRCUISR),
in_be32(priv->reg + TALITOS_CRCUISR_LO));
break;
}
for (i = 0; i < 8; i++)
dev_err(dev, "DESCBUF 0x%08x_%08x\n",
in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i),
in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i));
}
/*
* recover from error interrupts
*/
static void talitos_error(unsigned long data)
{
struct device *dev = (struct device *)data;
struct talitos_private *priv = dev_get_drvdata(dev);
unsigned int timeout = TALITOS_TIMEOUT;
int ch, error, reset_dev = 0, reset_ch = 0;
u32 isr, isr_lo, v, v_lo;
isr = in_be32(priv->reg + TALITOS_ISR);
isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
for (ch = 0; ch < priv->num_channels; ch++) {
/* skip channels without errors */
if (!(isr & (1 << (ch * 2 + 1))))
continue;
error = -EINVAL;
v = in_be32(priv->reg + TALITOS_CCPSR(ch));
v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch));
if (v_lo & TALITOS_CCPSR_LO_DOF) {
dev_err(dev, "double fetch fifo overflow error\n");
error = -EAGAIN;
reset_ch = 1;
}
if (v_lo & TALITOS_CCPSR_LO_SOF) {
/* h/w dropped descriptor */
dev_err(dev, "single fetch fifo overflow error\n");
error = -EAGAIN;
}
if (v_lo & TALITOS_CCPSR_LO_MDTE)
dev_err(dev, "master data transfer error\n");
if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
dev_err(dev, "s/g data length zero error\n");
if (v_lo & TALITOS_CCPSR_LO_FPZ)
dev_err(dev, "fetch pointer zero error\n");
if (v_lo & TALITOS_CCPSR_LO_IDH)
dev_err(dev, "illegal descriptor header error\n");
if (v_lo & TALITOS_CCPSR_LO_IEU)
dev_err(dev, "invalid execution unit error\n");
if (v_lo & TALITOS_CCPSR_LO_EU)
report_eu_error(dev, ch, current_desc(dev, ch));
if (v_lo & TALITOS_CCPSR_LO_GB)
dev_err(dev, "gather boundary error\n");
if (v_lo & TALITOS_CCPSR_LO_GRL)
dev_err(dev, "gather return/length error\n");
if (v_lo & TALITOS_CCPSR_LO_SB)
dev_err(dev, "scatter boundary error\n");
if (v_lo & TALITOS_CCPSR_LO_SRL)
dev_err(dev, "scatter return/length error\n");
flush_channel(dev, ch, error, reset_ch);
if (reset_ch) {
reset_channel(dev, ch);
} else {
setbits32(priv->reg + TALITOS_CCCR(ch),
TALITOS_CCCR_CONT);
setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0);
while ((in_be32(priv->reg + TALITOS_CCCR(ch)) &
TALITOS_CCCR_CONT) && --timeout)
cpu_relax();
if (timeout == 0) {
dev_err(dev, "failed to restart channel %d\n",
ch);
reset_dev = 1;
}
}
}
if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) {
dev_err(dev, "done overflow, internal time out, or rngu error: "
"ISR 0x%08x_%08x\n", isr, isr_lo);
/* purge request queues */
for (ch = 0; ch < priv->num_channels; ch++)
flush_channel(dev, ch, -EIO, 1);
/* reset and reinitialize the device */
init_device(dev);
}
}
static irqreturn_t talitos_interrupt(int irq, void *data)
{
struct device *dev = data;
struct talitos_private *priv = dev_get_drvdata(dev);
u32 isr, isr_lo;
isr = in_be32(priv->reg + TALITOS_ISR);
isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
/* ack */
out_be32(priv->reg + TALITOS_ICR, isr);
out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo))
talitos_error((unsigned long)data);
else
if (likely(isr & TALITOS_ISR_CHDONE))
tasklet_schedule(&priv->done_task);
return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
}
/*
* hwrng
*/
static int talitos_rng_data_present(struct hwrng *rng, int wait)
{
struct device *dev = (struct device *)rng->priv;
struct talitos_private *priv = dev_get_drvdata(dev);
u32 ofl;
int i;
for (i = 0; i < 20; i++) {
ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
TALITOS_RNGUSR_LO_OFL;
if (ofl || !wait)
break;
udelay(10);
}
return !!ofl;
}
static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
{
struct device *dev = (struct device *)rng->priv;
struct talitos_private *priv = dev_get_drvdata(dev);
/* rng fifo requires 64-bit accesses */
*data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
*data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
return sizeof(u32);
}
static int talitos_rng_init(struct hwrng *rng)
{
struct device *dev = (struct device *)rng->priv;
struct talitos_private *priv = dev_get_drvdata(dev);
unsigned int timeout = TALITOS_TIMEOUT;
setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
&& --timeout)
cpu_relax();
if (timeout == 0) {
dev_err(dev, "failed to reset rng hw\n");
return -ENODEV;
}
/* start generating */
setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
return 0;
}
static int talitos_register_rng(struct device *dev)
{
struct talitos_private *priv = dev_get_drvdata(dev);
priv->rng.name = dev_driver_string(dev),
priv->rng.init = talitos_rng_init,
priv->rng.data_present = talitos_rng_data_present,
priv->rng.data_read = talitos_rng_data_read,
priv->rng.priv = (unsigned long)dev;
return hwrng_register(&priv->rng);
}
static void talitos_unregister_rng(struct device *dev)
{
struct talitos_private *priv = dev_get_drvdata(dev);
hwrng_unregister(&priv->rng);
}
/*
* crypto alg
*/
#define TALITOS_CRA_PRIORITY 3000
#define TALITOS_MAX_KEY_SIZE 64
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
#define MD5_DIGEST_SIZE 16
struct talitos_ctx {
struct device *dev;
__be32 desc_hdr_template;
u8 key[TALITOS_MAX_KEY_SIZE];
u8 iv[TALITOS_MAX_IV_LENGTH];
unsigned int keylen;
unsigned int enckeylen;
unsigned int authkeylen;
unsigned int authsize;
};
static int aead_authenc_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
ctx->authsize = authsize;
return 0;
}
static int aead_authenc_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct rtattr *rta = (void *)key;
struct crypto_authenc_key_param *param;
unsigned int authkeylen;
unsigned int enckeylen;
if (!RTA_OK(rta, keylen))
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey;
param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < enckeylen)
goto badkey;
authkeylen = keylen - enckeylen;
if (keylen > TALITOS_MAX_KEY_SIZE)
goto badkey;
memcpy(&ctx->key, key, keylen);
ctx->keylen = keylen;
ctx->enckeylen = enckeylen;
ctx->authkeylen = authkeylen;
return 0;
badkey:
crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
/*
* ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
* @dma_len: length of dma mapped link_tbl space
* @dma_link_tbl: bus physical address of link_tbl
* @desc: h/w descriptor
* @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
*
* if decrypting (with authcheck), or either one of src_nents or dst_nents
* is greater than 1, an integrity check value is concatenated to the end
* of link_tbl data
*/
struct ipsec_esp_edesc {
int src_nents;
int dst_nents;
int dma_len;
dma_addr_t dma_link_tbl;
struct talitos_desc desc;
struct talitos_ptr link_tbl[0];
};
static void ipsec_esp_unmap(struct device *dev,
struct ipsec_esp_edesc *edesc,
struct aead_request *areq)
{
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
if (areq->src != areq->dst) {
dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
DMA_TO_DEVICE);
dma_unmap_sg(dev, areq->dst, edesc->dst_nents ? : 1,
DMA_FROM_DEVICE);
} else {
dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
DMA_BIDIRECTIONAL);
}
if (edesc->dma_len)
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
DMA_BIDIRECTIONAL);
}
/*
* ipsec_esp descriptor callbacks
*/
static void ipsec_esp_encrypt_done(struct device *dev,
struct talitos_desc *desc, void *context,
int err)
{
struct aead_request *areq = context;
struct ipsec_esp_edesc *edesc =
container_of(desc, struct ipsec_esp_edesc, desc);
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct scatterlist *sg;
void *icvdata;
ipsec_esp_unmap(dev, edesc, areq);
/* copy the generated ICV to dst */
if (edesc->dma_len) {
icvdata = &edesc->link_tbl[edesc->src_nents +
edesc->dst_nents + 1];
sg = sg_last(areq->dst, edesc->dst_nents);
memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
icvdata, ctx->authsize);
}
kfree(edesc);
aead_request_complete(areq, err);
}
static void ipsec_esp_decrypt_done(struct device *dev,
struct talitos_desc *desc, void *context,
int err)
{
struct aead_request *req = context;
struct ipsec_esp_edesc *edesc =
container_of(desc, struct ipsec_esp_edesc, desc);
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct scatterlist *sg;
void *icvdata;
ipsec_esp_unmap(dev, edesc, req);
if (!err) {
/* auth check */
if (edesc->dma_len)
icvdata = &edesc->link_tbl[edesc->src_nents +
edesc->dst_nents + 1];
else
icvdata = &edesc->link_tbl[0];
sg = sg_last(req->dst, edesc->dst_nents ? : 1);
err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
}
kfree(edesc);
aead_request_complete(req, err);
}
/*
* convert scatterlist to SEC h/w link table format
* stop at cryptlen bytes
*/
static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
int cryptlen, struct talitos_ptr *link_tbl_ptr)
{
int n_sg = sg_count;
while (n_sg--) {
link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg));
link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
link_tbl_ptr->j_extent = 0;
link_tbl_ptr++;
cryptlen -= sg_dma_len(sg);
sg = sg_next(sg);
}
/* adjust (decrease) last one (or two) entry's len to cryptlen */
link_tbl_ptr--;
while (link_tbl_ptr->len <= (-cryptlen)) {
/* Empty this entry, and move to previous one */
cryptlen += be16_to_cpu(link_tbl_ptr->len);
link_tbl_ptr->len = 0;
sg_count--;
link_tbl_ptr--;
}
link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
+ cryptlen);
/* tag end of link table */
link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
return sg_count;
}
/*
* fill in and submit ipsec_esp descriptor
*/
static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
u8 *giv, u64 seq,
void (*callback) (struct device *dev,
struct talitos_desc *desc,
void *context, int error))
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
struct device *dev = ctx->dev;
struct talitos_desc *desc = &edesc->desc;
unsigned int cryptlen = areq->cryptlen;
unsigned int authsize = ctx->authsize;
unsigned int ivsize;
int sg_count;
/* hmac key */
map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
0, DMA_TO_DEVICE);
/* hmac data */
map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) -
sg_virt(areq->assoc), sg_virt(areq->assoc), 0,
DMA_TO_DEVICE);
/* cipher iv */
ivsize = crypto_aead_ivsize(aead);
map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
DMA_TO_DEVICE);
/* cipher key */
map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
(char *)&ctx->key + ctx->authkeylen, 0,
DMA_TO_DEVICE);
/*
* cipher in
* map and adjust cipher len to aead request cryptlen.
* extent is bytes of HMAC postpended to ciphertext,
* typically 12 for ipsec
*/
desc->ptr[4].len = cpu_to_be16(cryptlen);
desc->ptr[4].j_extent = authsize;
if (areq->src == areq->dst)
sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
DMA_BIDIRECTIONAL);
else
sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
DMA_TO_DEVICE);
if (sg_count == 1) {
desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
} else {
sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
&edesc->link_tbl[0]);
if (sg_count > 1) {
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
} else {
/* Only one segment now, so no link tbl needed */
desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
}
}
/* cipher out */
desc->ptr[5].len = cpu_to_be16(cryptlen);
desc->ptr[5].j_extent = authsize;
if (areq->src != areq->dst) {
sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
DMA_FROM_DEVICE);
}
if (sg_count == 1) {
desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
} else {
struct talitos_ptr *link_tbl_ptr =
&edesc->link_tbl[edesc->src_nents];
struct scatterlist *sg;
desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
edesc->dma_link_tbl +
edesc->src_nents);
if (areq->src == areq->dst) {
memcpy(link_tbl_ptr, &edesc->link_tbl[0],
edesc->src_nents * sizeof(struct talitos_ptr));
} else {
sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
link_tbl_ptr);
}
link_tbl_ptr += sg_count - 1;
/* handle case where sg_last contains the ICV exclusively */
sg = sg_last(areq->dst, edesc->dst_nents);
if (sg->length == ctx->authsize)
link_tbl_ptr--;
link_tbl_ptr->j_extent = 0;
link_tbl_ptr++;
link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
link_tbl_ptr->len = cpu_to_be16(authsize);
/* icv data follows link tables */
link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
edesc->dma_link_tbl +
edesc->src_nents +
edesc->dst_nents + 1);
desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
}
/* iv out */
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
DMA_FROM_DEVICE);
return talitos_submit(dev, desc, callback, areq);
}
/*
* derive number of elements in scatterlist
*/
static int sg_count(struct scatterlist *sg_list, int nbytes)
{
struct scatterlist *sg = sg_list;
int sg_nents = 0;
while (nbytes) {
sg_nents++;
nbytes -= sg->length;
sg = sg_next(sg);
}
return sg_nents;
}
/*
* allocate and map the ipsec_esp extended descriptor
*/
static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
int icv_stashing)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct ipsec_esp_edesc *edesc;
int src_nents, dst_nents, alloc_len, dma_len;
if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) {
dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n");
return ERR_PTR(-EINVAL);
}
src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize);
src_nents = (src_nents == 1) ? 0 : src_nents;
if (areq->dst == areq->src) {
dst_nents = src_nents;
} else {
dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize);
dst_nents = (dst_nents == 1) ? 0 : src_nents;
}
/*
* allocate space for base edesc plus the link tables,
* allowing for a separate entry for the generated ICV (+ 1),
* and the ICV data itself
*/
alloc_len = sizeof(struct ipsec_esp_edesc);
if (src_nents || dst_nents) {
dma_len = (src_nents + dst_nents + 1) *
sizeof(struct talitos_ptr) + ctx->authsize;
alloc_len += dma_len;
} else {
dma_len = 0;
alloc_len += icv_stashing ? ctx->authsize : 0;
}
edesc = kmalloc(alloc_len, GFP_DMA);
if (!edesc) {
dev_err(ctx->dev, "could not allocate edescriptor\n");
return ERR_PTR(-ENOMEM);
}
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->dma_len = dma_len;
edesc->dma_link_tbl = dma_map_single(ctx->dev, &edesc->link_tbl[0],
edesc->dma_len, DMA_BIDIRECTIONAL);
return edesc;
}
static int aead_authenc_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct ipsec_esp_edesc *edesc;
/* allocate extended descriptor */
edesc = ipsec_esp_edesc_alloc(req, 0);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* set encrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
}
static int aead_authenc_decrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
unsigned int authsize = ctx->authsize;
struct ipsec_esp_edesc *edesc;
struct scatterlist *sg;
void *icvdata;
req->cryptlen -= authsize;
/* allocate extended descriptor */
edesc = ipsec_esp_edesc_alloc(req, 1);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* stash incoming ICV for later cmp with ICV generated by the h/w */
if (edesc->dma_len)
icvdata = &edesc->link_tbl[edesc->src_nents +
edesc->dst_nents + 1];
else
icvdata = &edesc->link_tbl[0];
sg = sg_last(req->src, edesc->src_nents ? : 1);
memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
ctx->authsize);
/* decrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_done);
}
static int aead_authenc_givencrypt(
struct aead_givcrypt_request *req)
{
struct aead_request *areq = &req->areq;
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct ipsec_esp_edesc *edesc;
/* allocate extended descriptor */
edesc = ipsec_esp_edesc_alloc(areq, 0);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* set encrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
return ipsec_esp(edesc, areq, req->giv, req->seq,
ipsec_esp_encrypt_done);
}
struct talitos_alg_template {
char name[CRYPTO_MAX_ALG_NAME];
char driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
struct aead_alg aead;
struct device *dev;
__be32 desc_hdr_template;
};
static struct talitos_alg_template driver_algs[] = {
/* single-pass ipsec_esp descriptor */
{
.name = "authenc(hmac(sha1),cbc(aes))",
.driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
.blocksize = AES_BLOCK_SIZE,
.aead = {
.setkey = aead_authenc_setkey,
.setauthsize = aead_authenc_setauthsize,
.encrypt = aead_authenc_encrypt,
.decrypt = aead_authenc_decrypt,
.givencrypt = aead_authenc_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC |
DESC_HDR_SEL1_MDEUA |
DESC_HDR_MODE1_MDEU_INIT |
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
{
.name = "authenc(hmac(sha1),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
.blocksize = DES3_EDE_BLOCK_SIZE,
.aead = {
.setkey = aead_authenc_setkey,
.setauthsize = aead_authenc_setauthsize,
.encrypt = aead_authenc_encrypt,
.decrypt = aead_authenc_decrypt,
.givencrypt = aead_authenc_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC |
DESC_HDR_MODE0_DEU_3DES |
DESC_HDR_SEL1_MDEUA |
DESC_HDR_MODE1_MDEU_INIT |
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
{
.name = "authenc(hmac(sha256),cbc(aes))",
.driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
.blocksize = AES_BLOCK_SIZE,
.aead = {
.setkey = aead_authenc_setkey,
.setauthsize = aead_authenc_setauthsize,
.encrypt = aead_authenc_encrypt,
.decrypt = aead_authenc_decrypt,
.givencrypt = aead_authenc_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC |
DESC_HDR_SEL1_MDEUA |
DESC_HDR_MODE1_MDEU_INIT |
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
{
.name = "authenc(hmac(sha256),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
.blocksize = DES3_EDE_BLOCK_SIZE,
.aead = {
.setkey = aead_authenc_setkey,
.setauthsize = aead_authenc_setauthsize,
.encrypt = aead_authenc_encrypt,
.decrypt = aead_authenc_decrypt,
.givencrypt = aead_authenc_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC |
DESC_HDR_MODE0_DEU_3DES |
DESC_HDR_SEL1_MDEUA |
DESC_HDR_MODE1_MDEU_INIT |
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
{
.name = "authenc(hmac(md5),cbc(aes))",
.driver_name = "authenc-hmac-md5-cbc-aes-talitos",
.blocksize = AES_BLOCK_SIZE,
.aead = {
.setkey = aead_authenc_setkey,
.setauthsize = aead_authenc_setauthsize,
.encrypt = aead_authenc_encrypt,
.decrypt = aead_authenc_decrypt,
.givencrypt = aead_authenc_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC |
DESC_HDR_SEL1_MDEUA |
DESC_HDR_MODE1_MDEU_INIT |
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
{
.name = "authenc(hmac(md5),cbc(des3_ede))",
.driver_name = "authenc-hmac-md5-cbc-3des-talitos",
.blocksize = DES3_EDE_BLOCK_SIZE,
.aead = {
.setkey = aead_authenc_setkey,
.setauthsize = aead_authenc_setauthsize,
.encrypt = aead_authenc_encrypt,
.decrypt = aead_authenc_decrypt,
.givencrypt = aead_authenc_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC |
DESC_HDR_MODE0_DEU_3DES |
DESC_HDR_SEL1_MDEUA |
DESC_HDR_MODE1_MDEU_INIT |
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_MD5_HMAC,
}
};
struct talitos_crypto_alg {
struct list_head entry;
struct device *dev;
__be32 desc_hdr_template;
struct crypto_alg crypto_alg;
};
static int talitos_cra_init(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
struct talitos_crypto_alg *talitos_alg =
container_of(alg, struct talitos_crypto_alg, crypto_alg);
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
/* update context with ptr to dev */
ctx->dev = talitos_alg->dev;
/* copy descriptor header template value */
ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
/* random first IV */
get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
return 0;
}
/*
* given the alg's descriptor header template, determine whether descriptor
* type and primary/secondary execution units required match the hw
* capabilities description provided in the device tree node.
*/
static int hw_supports(struct device *dev, __be32 desc_hdr_template)
{
struct talitos_private *priv = dev_get_drvdata(dev);
int ret;
ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
(1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
if (SECONDARY_EU(desc_hdr_template))
ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
& priv->exec_units);
return ret;
}
static int __devexit talitos_remove(struct of_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct talitos_private *priv = dev_get_drvdata(dev);
struct talitos_crypto_alg *t_alg, *n;
int i;
list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
crypto_unregister_alg(&t_alg->crypto_alg);
list_del(&t_alg->entry);
kfree(t_alg);
}
if (hw_supports(dev, DESC_HDR_SEL0_RNG))
talitos_unregister_rng(dev);
kfree(priv->tail);
kfree(priv->head);
if (priv->fifo)
for (i = 0; i < priv->num_channels; i++)
kfree(priv->fifo[i]);
kfree(priv->fifo);
kfree(priv->head_lock);
kfree(priv->tail_lock);
if (priv->irq != NO_IRQ) {
free_irq(priv->irq, dev);
irq_dispose_mapping(priv->irq);
}
tasklet_kill(&priv->done_task);
tasklet_kill(&priv->error_task);
iounmap(priv->reg);
dev_set_drvdata(dev, NULL);
kfree(priv);
return 0;
}
static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
struct talitos_alg_template
*template)
{
struct talitos_crypto_alg *t_alg;
struct crypto_alg *alg;
t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
if (!t_alg)
return ERR_PTR(-ENOMEM);
alg = &t_alg->crypto_alg;
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
alg->cra_module = THIS_MODULE;
alg->cra_init = talitos_cra_init;
alg->cra_priority = TALITOS_CRA_PRIORITY;
alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
alg->cra_type = &crypto_aead_type;
alg->cra_ctxsize = sizeof(struct talitos_ctx);
alg->cra_u.aead = template->aead;
t_alg->desc_hdr_template = template->desc_hdr_template;
t_alg->dev = dev;
return t_alg;
}
static int talitos_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->node;
struct talitos_private *priv;
const unsigned int *prop;
int i, err;
priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dev_set_drvdata(dev, priv);
priv->ofdev = ofdev;
tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
tasklet_init(&priv->error_task, talitos_error, (unsigned long)dev);
priv->irq = irq_of_parse_and_map(np, 0);
if (priv->irq == NO_IRQ) {
dev_err(dev, "failed to map irq\n");
err = -EINVAL;
goto err_out;
}
/* get the irq line */
err = request_irq(priv->irq, talitos_interrupt, 0,
dev_driver_string(dev), dev);
if (err) {
dev_err(dev, "failed to request irq %d\n", priv->irq);
irq_dispose_mapping(priv->irq);
priv->irq = NO_IRQ;
goto err_out;
}
priv->reg = of_iomap(np, 0);
if (!priv->reg) {
dev_err(dev, "failed to of_iomap\n");
err = -ENOMEM;
goto err_out;
}
/* get SEC version capabilities from device tree */
prop = of_get_property(np, "fsl,num-channels", NULL);
if (prop)
priv->num_channels = *prop;
prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
if (prop)
priv->chfifo_len = *prop;
prop = of_get_property(np, "fsl,exec-units-mask", NULL);
if (prop)
priv->exec_units = *prop;
prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
if (prop)
priv->desc_types = *prop;
if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
!priv->exec_units || !priv->desc_types) {
dev_err(dev, "invalid property data in device tree node\n");
err = -EINVAL;
goto err_out;
}
of_node_put(np);
np = NULL;
priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
GFP_KERNEL);
priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
GFP_KERNEL);
if (!priv->head_lock || !priv->tail_lock) {
dev_err(dev, "failed to allocate fifo locks\n");
err = -ENOMEM;
goto err_out;
}
for (i = 0; i < priv->num_channels; i++) {
spin_lock_init(&priv->head_lock[i]);
spin_lock_init(&priv->tail_lock[i]);
}
priv->fifo = kmalloc(sizeof(struct talitos_request *) *
priv->num_channels, GFP_KERNEL);
if (!priv->fifo) {
dev_err(dev, "failed to allocate request fifo\n");
err = -ENOMEM;
goto err_out;
}
priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
for (i = 0; i < priv->num_channels; i++) {
priv->fifo[i] = kzalloc(sizeof(struct talitos_request) *
priv->fifo_len, GFP_KERNEL);
if (!priv->fifo[i]) {
dev_err(dev, "failed to allocate request fifo %d\n", i);
err = -ENOMEM;
goto err_out;
}
}
priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
if (!priv->head || !priv->tail) {
dev_err(dev, "failed to allocate request index space\n");
err = -ENOMEM;
goto err_out;
}
/* reset and initialize the h/w */
err = init_device(dev);
if (err) {
dev_err(dev, "failed to initialize device\n");
goto err_out;
}
/* register the RNG, if available */
if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
err = talitos_register_rng(dev);
if (err) {
dev_err(dev, "failed to register hwrng: %d\n", err);
goto err_out;
} else
dev_info(dev, "hwrng\n");
}
/* register crypto algorithms the device supports */
INIT_LIST_HEAD(&priv->alg_list);
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
struct talitos_crypto_alg *t_alg;
t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
goto err_out;
}
err = crypto_register_alg(&t_alg->crypto_alg);
if (err) {
dev_err(dev, "%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
} else {
list_add_tail(&t_alg->entry, &priv->alg_list);
dev_info(dev, "%s\n",
t_alg->crypto_alg.cra_driver_name);
}
}
}
return 0;
err_out:
talitos_remove(ofdev);
if (np)
of_node_put(np);
return err;
}
static struct of_device_id talitos_match[] = {
{
.compatible = "fsl,sec2.0",
},
{},
};
MODULE_DEVICE_TABLE(of, talitos_match);
static struct of_platform_driver talitos_driver = {
.name = "talitos",
.match_table = talitos_match,
.probe = talitos_probe,
.remove = __devexit_p(talitos_remove),
};
static int __init talitos_init(void)
{
return of_register_platform_driver(&talitos_driver);
}
module_init(talitos_init);
static void __exit talitos_exit(void)
{
of_unregister_platform_driver(&talitos_driver);
}
module_exit(talitos_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
/*
* Freescale SEC (talitos) device register and descriptor header defines
*
* Copyright (c) 2006-2008 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register
*/
/* global register offset addresses */
#define TALITOS_MCR 0x1030 /* master control register */
#define TALITOS_MCR_LO 0x1038
#define TALITOS_MCR_SWR 0x1 /* s/w reset */
#define TALITOS_IMR 0x1008 /* interrupt mask register */
#define TALITOS_IMR_INIT 0x10fff /* enable channel IRQs */
#define TALITOS_IMR_LO 0x100C
#define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */
#define TALITOS_ISR 0x1010 /* interrupt status register */
#define TALITOS_ISR_CHERR 0xaa /* channel errors mask */
#define TALITOS_ISR_CHDONE 0x55 /* channel done mask */
#define TALITOS_ISR_LO 0x1014
#define TALITOS_ICR 0x1018 /* interrupt clear register */
#define TALITOS_ICR_LO 0x101C
/* channel register address stride */
#define TALITOS_CH_STRIDE 0x100
/* channel configuration register */
#define TALITOS_CCCR(ch) (ch * TALITOS_CH_STRIDE + 0x1108)
#define TALITOS_CCCR_CONT 0x2 /* channel continue */
#define TALITOS_CCCR_RESET 0x1 /* channel reset */
#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c)
#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
#define TALITOS_CCCR_LO_NT 0x4 /* notification type */
#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */
/* CCPSR: channel pointer status register */
#define TALITOS_CCPSR(ch) (ch * TALITOS_CH_STRIDE + 0x1110)
#define TALITOS_CCPSR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1114)
#define TALITOS_CCPSR_LO_DOF 0x8000 /* double FF write oflow error */
#define TALITOS_CCPSR_LO_SOF 0x4000 /* single FF write oflow error */
#define TALITOS_CCPSR_LO_MDTE 0x2000 /* master data transfer error */
#define TALITOS_CCPSR_LO_SGDLZ 0x1000 /* s/g data len zero error */
#define TALITOS_CCPSR_LO_FPZ 0x0800 /* fetch ptr zero error */
#define TALITOS_CCPSR_LO_IDH 0x0400 /* illegal desc hdr error */
#define TALITOS_CCPSR_LO_IEU 0x0200 /* invalid EU error */
#define TALITOS_CCPSR_LO_EU 0x0100 /* EU error detected */
#define TALITOS_CCPSR_LO_GB 0x0080 /* gather boundary error */
#define TALITOS_CCPSR_LO_GRL 0x0040 /* gather return/length error */
#define TALITOS_CCPSR_LO_SB 0x0020 /* scatter boundary error */
#define TALITOS_CCPSR_LO_SRL 0x0010 /* scatter return/length error */
/* channel fetch fifo register */
#define TALITOS_FF(ch) (ch * TALITOS_CH_STRIDE + 0x1148)
#define TALITOS_FF_LO(ch) (ch * TALITOS_CH_STRIDE + 0x114c)
/* current descriptor pointer register */
#define TALITOS_CDPR(ch) (ch * TALITOS_CH_STRIDE + 0x1140)
#define TALITOS_CDPR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1144)
/* descriptor buffer register */
#define TALITOS_DESCBUF(ch) (ch * TALITOS_CH_STRIDE + 0x1180)
#define TALITOS_DESCBUF_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1184)
/* gather link table */
#define TALITOS_GATHER(ch) (ch * TALITOS_CH_STRIDE + 0x11c0)
#define TALITOS_GATHER_LO(ch) (ch * TALITOS_CH_STRIDE + 0x11c4)
/* scatter link table */
#define TALITOS_SCATTER(ch) (ch * TALITOS_CH_STRIDE + 0x11e0)
#define TALITOS_SCATTER_LO(ch) (ch * TALITOS_CH_STRIDE + 0x11e4)
/* execution unit interrupt status registers */
#define TALITOS_DEUISR 0x2030 /* DES unit */
#define TALITOS_DEUISR_LO 0x2034
#define TALITOS_AESUISR 0x4030 /* AES unit */
#define TALITOS_AESUISR_LO 0x4034
#define TALITOS_MDEUISR 0x6030 /* message digest unit */
#define TALITOS_MDEUISR_LO 0x6034
#define TALITOS_AFEUISR 0x8030 /* arc4 unit */
#define TALITOS_AFEUISR_LO 0x8034
#define TALITOS_RNGUISR 0xa030 /* random number unit */
#define TALITOS_RNGUISR_LO 0xa034
#define TALITOS_RNGUSR 0xa028 /* rng status */
#define TALITOS_RNGUSR_LO 0xa02c
#define TALITOS_RNGUSR_LO_RD 0x1 /* reset done */
#define TALITOS_RNGUSR_LO_OFL 0xff0000/* output FIFO length */
#define TALITOS_RNGUDSR 0xa010 /* data size */
#define TALITOS_RNGUDSR_LO 0xa014
#define TALITOS_RNGU_FIFO 0xa800 /* output FIFO */
#define TALITOS_RNGU_FIFO_LO 0xa804 /* output FIFO */
#define TALITOS_RNGURCR 0xa018 /* reset control */
#define TALITOS_RNGURCR_LO 0xa01c
#define TALITOS_RNGURCR_LO_SR 0x1 /* software reset */
#define TALITOS_PKEUISR 0xc030 /* public key unit */
#define TALITOS_PKEUISR_LO 0xc034
#define TALITOS_KEUISR 0xe030 /* kasumi unit */
#define TALITOS_KEUISR_LO 0xe034
#define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/
#define TALITOS_CRCUISR_LO 0xf034
/*
* talitos descriptor header (hdr) bits
*/
/* written back when done */
#define DESC_HDR_DONE __constant_cpu_to_be32(0xff000000)
/* primary execution unit select */
#define DESC_HDR_SEL0_MASK __constant_cpu_to_be32(0xf0000000)
#define DESC_HDR_SEL0_AFEU __constant_cpu_to_be32(0x10000000)
#define DESC_HDR_SEL0_DEU __constant_cpu_to_be32(0x20000000)
#define DESC_HDR_SEL0_MDEUA __constant_cpu_to_be32(0x30000000)
#define DESC_HDR_SEL0_MDEUB __constant_cpu_to_be32(0xb0000000)
#define DESC_HDR_SEL0_RNG __constant_cpu_to_be32(0x40000000)
#define DESC_HDR_SEL0_PKEU __constant_cpu_to_be32(0x50000000)
#define DESC_HDR_SEL0_AESU __constant_cpu_to_be32(0x60000000)
#define DESC_HDR_SEL0_KEU __constant_cpu_to_be32(0x70000000)
#define DESC_HDR_SEL0_CRCU __constant_cpu_to_be32(0x80000000)
/* primary execution unit mode (MODE0) and derivatives */
#define DESC_HDR_MODE0_ENCRYPT __constant_cpu_to_be32(0x00100000)
#define DESC_HDR_MODE0_AESU_CBC __constant_cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_DEU_CBC __constant_cpu_to_be32(0x00400000)
#define DESC_HDR_MODE0_DEU_3DES __constant_cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_MDEU_INIT __constant_cpu_to_be32(0x01000000)
#define DESC_HDR_MODE0_MDEU_HMAC __constant_cpu_to_be32(0x00800000)
#define DESC_HDR_MODE0_MDEU_PAD __constant_cpu_to_be32(0x00400000)
#define DESC_HDR_MODE0_MDEU_MD5 __constant_cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_MDEU_SHA256 __constant_cpu_to_be32(0x00100000)
#define DESC_HDR_MODE0_MDEU_SHA1 __constant_cpu_to_be32(0x00000000)
#define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \
DESC_HDR_MODE0_MDEU_HMAC)
#define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \
DESC_HDR_MODE0_MDEU_HMAC)
#define DESC_HDR_MODE0_MDEU_SHA1_HMAC (DESC_HDR_MODE0_MDEU_SHA1 | \
DESC_HDR_MODE0_MDEU_HMAC)
/* secondary execution unit select (SEL1) */
#define DESC_HDR_SEL1_MASK __constant_cpu_to_be32(0x000f0000)
#define DESC_HDR_SEL1_MDEUA __constant_cpu_to_be32(0x00030000)
#define DESC_HDR_SEL1_MDEUB __constant_cpu_to_be32(0x000b0000)
#define DESC_HDR_SEL1_CRCU __constant_cpu_to_be32(0x00080000)
/* secondary execution unit mode (MODE1) and derivatives */
#define DESC_HDR_MODE1_MDEU_INIT __constant_cpu_to_be32(0x00001000)
#define DESC_HDR_MODE1_MDEU_HMAC __constant_cpu_to_be32(0x00000800)
#define DESC_HDR_MODE1_MDEU_PAD __constant_cpu_to_be32(0x00000400)
#define DESC_HDR_MODE1_MDEU_MD5 __constant_cpu_to_be32(0x00000200)
#define DESC_HDR_MODE1_MDEU_SHA256 __constant_cpu_to_be32(0x00000100)
#define DESC_HDR_MODE1_MDEU_SHA1 __constant_cpu_to_be32(0x00000000)
#define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \
DESC_HDR_MODE1_MDEU_HMAC)
#define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \
DESC_HDR_MODE1_MDEU_HMAC)
#define DESC_HDR_MODE1_MDEU_SHA1_HMAC (DESC_HDR_MODE1_MDEU_SHA1 | \
DESC_HDR_MODE1_MDEU_HMAC)
/* direction of overall data flow (DIR) */
#define DESC_HDR_DIR_INBOUND __constant_cpu_to_be32(0x00000002)
/* request done notification (DN) */
#define DESC_HDR_DONE_NOTIFY __constant_cpu_to_be32(0x00000001)
/* descriptor types */
#define DESC_HDR_TYPE_AESU_CTR_NONSNOOP __constant_cpu_to_be32(0 << 3)
#define DESC_HDR_TYPE_IPSEC_ESP __constant_cpu_to_be32(1 << 3)
#define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU __constant_cpu_to_be32(2 << 3)
#define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU __constant_cpu_to_be32(4 << 3)
/* link table extent field bits */
#define DESC_PTR_LNKTBL_JUMP 0x80
#define DESC_PTR_LNKTBL_RETURN 0x02
#define DESC_PTR_LNKTBL_NEXT 0x01
/*
* Hash: Hash algorithms under the crypto API
*
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_HASH_H
#define _CRYPTO_HASH_H
#include <linux/crypto.h>
struct crypto_ahash {
struct crypto_tfm base;
};
static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
{
return (struct crypto_ahash *)tfm;
}
static inline struct crypto_ahash *crypto_alloc_ahash(const char *alg_name,
u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
mask &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_AHASH;
mask |= CRYPTO_ALG_TYPE_AHASH_MASK;
return __crypto_ahash_cast(crypto_alloc_base(alg_name, type, mask));
}
static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
{
return &tfm->base;
}
static inline void crypto_free_ahash(struct crypto_ahash *tfm)
{
crypto_free_tfm(crypto_ahash_tfm(tfm));
}
static inline unsigned int crypto_ahash_alignmask(
struct crypto_ahash *tfm)
{
return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm));
}
static inline struct ahash_tfm *crypto_ahash_crt(struct crypto_ahash *tfm)
{
return &crypto_ahash_tfm(tfm)->crt_ahash;
}
static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
{
return crypto_ahash_crt(tfm)->digestsize;
}
static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
{
return crypto_tfm_get_flags(crypto_ahash_tfm(tfm));
}
static inline void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags)
{
crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags);
}
static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags)
{
crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags);
}
static inline struct crypto_ahash *crypto_ahash_reqtfm(
struct ahash_request *req)
{
return __crypto_ahash_cast(req->base.tfm);
}
static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
{
return crypto_ahash_crt(tfm)->reqsize;
}
static inline int crypto_ahash_setkey(struct crypto_ahash *tfm,
const u8 *key, unsigned int keylen)
{
struct ahash_tfm *crt = crypto_ahash_crt(tfm);
return crt->setkey(tfm, key, keylen);
}
static inline int crypto_ahash_digest(struct ahash_request *req)
{
struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
return crt->digest(req);
}
static inline void ahash_request_set_tfm(struct ahash_request *req,
struct crypto_ahash *tfm)
{
req->base.tfm = crypto_ahash_tfm(tfm);
}
static inline struct ahash_request *ahash_request_alloc(
struct crypto_ahash *tfm, gfp_t gfp)
{
struct ahash_request *req;
req = kmalloc(sizeof(struct ahash_request) +
crypto_ahash_reqsize(tfm), gfp);
if (likely(req))
ahash_request_set_tfm(req, tfm);
return req;
}
static inline void ahash_request_free(struct ahash_request *req)
{
kfree(req);
}
static inline struct ahash_request *ahash_request_cast(
struct crypto_async_request *req)
{
return container_of(req, struct ahash_request, base);
}
static inline void ahash_request_set_callback(struct ahash_request *req,
u32 flags,
crypto_completion_t complete,
void *data)
{
req->base.complete = complete;
req->base.data = data;
req->base.flags = flags;
}
static inline void ahash_request_set_crypt(struct ahash_request *req,
struct scatterlist *src, u8 *result,
unsigned int nbytes)
{
req->src = src;
req->nbytes = nbytes;
req->result = result;
}
#endif /* _CRYPTO_HASH_H */
/*
* Hash algorithms.
*
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_INTERNAL_HASH_H
#define _CRYPTO_INTERNAL_HASH_H
#include <crypto/algapi.h>
#include <crypto/hash.h>
struct ahash_request;
struct scatterlist;
struct crypto_hash_walk {
char *data;
unsigned int offset;
unsigned int alignmask;
struct page *pg;
unsigned int entrylen;
unsigned int total;
struct scatterlist *sg;
unsigned int flags;
};
extern const struct crypto_type crypto_ahash_type;
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk);
static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
{
return crypto_tfm_ctx(&tfm->base);
}
static inline struct ahash_alg *crypto_ahash_alg(
struct crypto_ahash *tfm)
{
return &crypto_ahash_tfm(tfm)->__crt_alg->cra_ahash;
}
static inline int ahash_enqueue_request(struct crypto_queue *queue,
struct ahash_request *request)
{
return crypto_enqueue_request(queue, &request->base);
}
static inline struct ahash_request *ahash_dequeue_request(
struct crypto_queue *queue)
{
return ahash_request_cast(crypto_dequeue_request(queue));
}
static inline void *ahash_request_ctx(struct ahash_request *req)
{
return req->__ctx;
}
static inline int ahash_tfm_in_queue(struct crypto_queue *queue,
struct crypto_ahash *tfm)
{
return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm));
}
#endif /* _CRYPTO_INTERNAL_HASH_H */
......@@ -30,15 +30,17 @@
*/
#define CRYPTO_ALG_TYPE_MASK 0x0000000f
#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
#define CRYPTO_ALG_TYPE_DIGEST 0x00000002
#define CRYPTO_ALG_TYPE_HASH 0x00000003
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
#define CRYPTO_ALG_TYPE_AEAD 0x00000003
#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000008
#define CRYPTO_ALG_TYPE_AEAD 0x00000009
#define CRYPTO_ALG_TYPE_DIGEST 0x00000008
#define CRYPTO_ALG_TYPE_HASH 0x00000009
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
#define CRYPTO_ALG_LARVAL 0x00000010
......@@ -102,6 +104,7 @@ struct crypto_async_request;
struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
struct crypto_ahash;
struct crypto_tfm;
struct crypto_type;
struct aead_givcrypt_request;
......@@ -131,6 +134,16 @@ struct ablkcipher_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
struct ahash_request {
struct crypto_async_request base;
unsigned int nbytes;
struct scatterlist *src;
u8 *result;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
/**
* struct aead_request - AEAD request
* @base: Common attributes for async crypto requests
......@@ -195,6 +208,17 @@ struct ablkcipher_alg {
unsigned int ivsize;
};
struct ahash_alg {
int (*init)(struct ahash_request *req);
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*digest)(struct ahash_request *req);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
unsigned int digestsize;
};
struct aead_alg {
int (*setkey)(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
......@@ -272,6 +296,7 @@ struct compress_alg {
#define cra_cipher cra_u.cipher
#define cra_digest cra_u.digest
#define cra_hash cra_u.hash
#define cra_ahash cra_u.ahash
#define cra_compress cra_u.compress
struct crypto_alg {
......@@ -298,6 +323,7 @@ struct crypto_alg {
struct cipher_alg cipher;
struct digest_alg digest;
struct hash_alg hash;
struct ahash_alg ahash;
struct compress_alg compress;
} cra_u;
......@@ -383,6 +409,18 @@ struct hash_tfm {
unsigned int digestsize;
};
struct ahash_tfm {
int (*init)(struct ahash_request *req);
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*digest)(struct ahash_request *req);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
unsigned int digestsize;
unsigned int reqsize;
};
struct compress_tfm {
int (*cot_compress)(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
......@@ -397,6 +435,7 @@ struct compress_tfm {
#define crt_blkcipher crt_u.blkcipher
#define crt_cipher crt_u.cipher
#define crt_hash crt_u.hash
#define crt_ahash crt_u.ahash
#define crt_compress crt_u.compress
struct crypto_tfm {
......@@ -409,6 +448,7 @@ struct crypto_tfm {
struct blkcipher_tfm blkcipher;
struct cipher_tfm cipher;
struct hash_tfm hash;
struct ahash_tfm ahash;
struct compress_tfm compress;
} crt_u;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment