Commit 37d40084 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (31 commits)
  crypto: aes_generic - Fix checkpatch errors
  crypto: fcrypt - Fix checkpatch errors
  crypto: ecb - Fix checkpatch errors
  crypto: des_generic - Fix checkpatch errors
  crypto: deflate - Fix checkpatch errors
  crypto: crypto_null - Fix checkpatch errors
  crypto: cipher - Fix checkpatch errors
  crypto: crc32 - Fix checkpatch errors
  crypto: compress - Fix checkpatch errors
  crypto: cast6 - Fix checkpatch errors
  crypto: cast5 - Fix checkpatch errors
  crypto: camellia - Fix checkpatch errors
  crypto: authenc - Fix checkpatch errors
  crypto: api - Fix checkpatch errors
  crypto: anubis - Fix checkpatch errors
  crypto: algapi - Fix checkpatch errors
  crypto: blowfish - Fix checkpatch errors
  crypto: aead - Fix checkpatch errors
  crypto: ablkcipher - Fix checkpatch errors
  crypto: pcrypt - call the complete function on error
  ...
parents 68c6b859 8d0c123f
...@@ -86,11 +86,19 @@ static struct amba_device cpu8815_amba_gpio[] = { ...@@ -86,11 +86,19 @@ static struct amba_device cpu8815_amba_gpio[] = {
}, },
}; };
static struct amba_device cpu8815_amba_rng = {
.dev = {
.init_name = "rng",
},
__MEM_4K_RESOURCE(NOMADIK_RNG_BASE),
};
static struct amba_device *amba_devs[] __initdata = { static struct amba_device *amba_devs[] __initdata = {
cpu8815_amba_gpio + 0, cpu8815_amba_gpio + 0,
cpu8815_amba_gpio + 1, cpu8815_amba_gpio + 1,
cpu8815_amba_gpio + 2, cpu8815_amba_gpio + 2,
cpu8815_amba_gpio + 3, cpu8815_amba_gpio + 3,
&cpu8815_amba_rng
}; };
static int __init cpu8815_init(void) static int __init cpu8815_init(void)
......
...@@ -78,14 +78,14 @@ static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key, ...@@ -78,14 +78,14 @@ static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
int ret; int ret;
sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags & sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
CRYPTO_TFM_REQ_MASK); CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
if (ret) { if (ret) {
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags & tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
CRYPTO_TFM_RES_MASK); CRYPTO_TFM_RES_MASK);
} }
return ret; return ret;
......
...@@ -114,6 +114,16 @@ config CRYPTO_NULL ...@@ -114,6 +114,16 @@ config CRYPTO_NULL
help help
These are 'Null' algorithms, used by IPsec, which do nothing. These are 'Null' algorithms, used by IPsec, which do nothing.
config CRYPTO_PCRYPT
tristate "Parallel crypto engine (EXPERIMENTAL)"
depends on SMP && EXPERIMENTAL
select PADATA
select CRYPTO_MANAGER
select CRYPTO_AEAD
help
This converts an arbitrary crypto algorithm into a parallel
algorithm that executes in kernel threads.
config CRYPTO_WORKQUEUE config CRYPTO_WORKQUEUE
tristate tristate
......
...@@ -56,6 +56,7 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o ...@@ -56,6 +56,7 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o
obj-$(CONFIG_CRYPTO_CTR) += ctr.o obj-$(CONFIG_CRYPTO_CTR) += ctr.o
obj-$(CONFIG_CRYPTO_GCM) += gcm.o obj-$(CONFIG_CRYPTO_GCM) += gcm.o
obj-$(CONFIG_CRYPTO_CCM) += ccm.o obj-$(CONFIG_CRYPTO_CCM) += ccm.o
obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
obj-$(CONFIG_CRYPTO_DES) += des_generic.o obj-$(CONFIG_CRYPTO_DES) += des_generic.o
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
......
/* /*
* Asynchronous block chaining cipher operations. * Asynchronous block chaining cipher operations.
* *
* This is the asynchronous version of blkcipher.c indicating completion * This is the asynchronous version of blkcipher.c indicating completion
* via a callback. * via a callback.
* *
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free * under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option) * Software Foundation; either version 2 of the License, or (at your option)
* any later version. * any later version.
* *
*/ */
......
/* /*
* AEAD: Authenticated Encryption with Associated Data * AEAD: Authenticated Encryption with Associated Data
* *
* This file provides API support for AEAD algorithms. * This file provides API support for AEAD algorithms.
* *
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free * under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option) * Software Foundation; either version 2 of the License, or (at your option)
* any later version. * any later version.
* *
*/ */
......
/* /*
* Cryptographic API. * Cryptographic API.
* *
* AES Cipher Algorithm. * AES Cipher Algorithm.
...@@ -1127,7 +1127,7 @@ EXPORT_SYMBOL_GPL(crypto_il_tab); ...@@ -1127,7 +1127,7 @@ EXPORT_SYMBOL_GPL(crypto_il_tab);
#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b) #define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
#define imix_col(y,x) do { \ #define imix_col(y, x) do { \
u = star_x(x); \ u = star_x(x); \
v = star_x(u); \ v = star_x(u); \
w = star_x(v); \ w = star_x(v); \
......
...@@ -230,7 +230,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) ...@@ -230,7 +230,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
list_add(&alg->cra_list, &crypto_alg_list); list_add(&alg->cra_list, &crypto_alg_list);
list_add(&larval->alg.cra_list, &crypto_alg_list); list_add(&larval->alg.cra_list, &crypto_alg_list);
out: out:
return larval; return larval;
free_larval: free_larval:
...@@ -388,7 +388,7 @@ int crypto_unregister_alg(struct crypto_alg *alg) ...@@ -388,7 +388,7 @@ int crypto_unregister_alg(struct crypto_alg *alg)
{ {
int ret; int ret;
LIST_HEAD(list); LIST_HEAD(list);
down_write(&crypto_alg_sem); down_write(&crypto_alg_sem);
ret = crypto_remove_alg(alg, &list); ret = crypto_remove_alg(alg, &list);
up_write(&crypto_alg_sem); up_write(&crypto_alg_sem);
......
...@@ -469,14 +469,13 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, ...@@ -469,14 +469,13 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
u32 kappa[ANUBIS_MAX_N]; u32 kappa[ANUBIS_MAX_N];
u32 inter[ANUBIS_MAX_N]; u32 inter[ANUBIS_MAX_N];
switch (key_len) switch (key_len) {
{
case 16: case 20: case 24: case 28: case 16: case 20: case 24: case 28:
case 32: case 36: case 40: case 32: case 36: case 40:
break; break;
default: default:
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return - EINVAL; return -EINVAL;
} }
ctx->key_len = key_len * 8; ctx->key_len = key_len * 8;
...@@ -530,23 +529,24 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, ...@@ -530,23 +529,24 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
/* /*
* compute kappa^{r+1} from kappa^r: * compute kappa^{r+1} from kappa^r:
*/ */
if (r == R) { if (r == R)
break; break;
}
for (i = 0; i < N; i++) { for (i = 0; i < N; i++) {
int j = i; int j = i;
inter[i] = T0[(kappa[j--] >> 24) ]; inter[i] = T0[(kappa[j--] >> 24) ];
if (j < 0) j = N - 1; if (j < 0)
j = N - 1;
inter[i] ^= T1[(kappa[j--] >> 16) & 0xff]; inter[i] ^= T1[(kappa[j--] >> 16) & 0xff];
if (j < 0) j = N - 1; if (j < 0)
j = N - 1;
inter[i] ^= T2[(kappa[j--] >> 8) & 0xff]; inter[i] ^= T2[(kappa[j--] >> 8) & 0xff];
if (j < 0) j = N - 1; if (j < 0)
j = N - 1;
inter[i] ^= T3[(kappa[j ] ) & 0xff]; inter[i] ^= T3[(kappa[j ] ) & 0xff];
} }
kappa[0] = inter[0] ^ rc[r]; kappa[0] = inter[0] ^ rc[r];
for (i = 1; i < N; i++) { for (i = 1; i < N; i++)
kappa[i] = inter[i]; kappa[i] = inter[i];
}
} }
/* /*
...@@ -690,7 +690,7 @@ static struct crypto_alg anubis_alg = { ...@@ -690,7 +690,7 @@ static struct crypto_alg anubis_alg = {
static int __init anubis_mod_init(void) static int __init anubis_mod_init(void)
{ {
int ret = 0; int ret = 0;
ret = crypto_register_alg(&anubis_alg); ret = crypto_register_alg(&anubis_alg);
return ret; return ret;
} }
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free * under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option) * Software Foundation; either version 2 of the License, or (at your option)
* any later version. * any later version.
* *
*/ */
...@@ -288,11 +288,11 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) ...@@ -288,11 +288,11 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
case CRYPTO_ALG_TYPE_COMPRESS: case CRYPTO_ALG_TYPE_COMPRESS:
return crypto_init_compress_ops(tfm); return crypto_init_compress_ops(tfm);
default: default:
break; break;
} }
BUG(); BUG();
return -EINVAL; return -EINVAL;
} }
...@@ -315,10 +315,9 @@ static void crypto_exit_ops(struct crypto_tfm *tfm) ...@@ -315,10 +315,9 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
case CRYPTO_ALG_TYPE_COMPRESS: case CRYPTO_ALG_TYPE_COMPRESS:
crypto_exit_compress_ops(tfm); crypto_exit_compress_ops(tfm);
break; break;
default: default:
BUG(); BUG();
} }
} }
...@@ -593,12 +592,12 @@ int crypto_has_alg(const char *name, u32 type, u32 mask) ...@@ -593,12 +592,12 @@ int crypto_has_alg(const char *name, u32 type, u32 mask)
{ {
int ret = 0; int ret = 0;
struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask); struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask);
if (!IS_ERR(alg)) { if (!IS_ERR(alg)) {
crypto_mod_put(alg); crypto_mod_put(alg);
ret = 1; ret = 1;
} }
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(crypto_has_alg); EXPORT_SYMBOL_GPL(crypto_has_alg);
......
...@@ -194,7 +194,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, ...@@ -194,7 +194,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0); authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0; err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err) if (err)
goto out; goto out;
...@@ -231,7 +231,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq, ...@@ -231,7 +231,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0); authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0; err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err) if (err)
goto out; goto out;
...@@ -464,7 +464,7 @@ static int crypto_authenc_verify(struct aead_request *req, ...@@ -464,7 +464,7 @@ static int crypto_authenc_verify(struct aead_request *req,
ihash = ohash + authsize; ihash = ohash + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0); authsize, 0);
return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0; return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
} }
static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
...@@ -557,11 +557,11 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) ...@@ -557,11 +557,11 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
ctx->auth = auth; ctx->auth = auth;
ctx->enc = enc; ctx->enc = enc;
tfm->crt_aead.reqsize = max_t(unsigned int, tfm->crt_aead.reqsize = max_t(unsigned int,
crypto_ahash_reqsize(auth) + ctx->reqoff + crypto_ahash_reqsize(auth) + ctx->reqoff +
sizeof(struct authenc_request_ctx) + sizeof(struct authenc_request_ctx) +
sizeof(struct ahash_request), sizeof(struct ahash_request),
sizeof(struct skcipher_givcrypt_request) + sizeof(struct skcipher_givcrypt_request) +
crypto_ablkcipher_reqsize(enc) + crypto_ablkcipher_reqsize(enc) +
crypto_ablkcipher_ivsize(enc)); crypto_ablkcipher_ivsize(enc));
......
/* /*
* Cryptographic API. * Cryptographic API.
* *
* Blowfish Cipher Algorithm, by Bruce Schneier. * Blowfish Cipher Algorithm, by Bruce Schneier.
...@@ -299,7 +299,7 @@ static const u32 bf_sbox[256 * 4] = { ...@@ -299,7 +299,7 @@ static const u32 bf_sbox[256 * 4] = {
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
}; };
/* /*
* Round loop unrolling macros, S is a pointer to a S-Box array * Round loop unrolling macros, S is a pointer to a S-Box array
* organized in 4 unsigned longs at a row. * organized in 4 unsigned longs at a row.
*/ */
...@@ -315,7 +315,7 @@ static const u32 bf_sbox[256 * 4] = { ...@@ -315,7 +315,7 @@ static const u32 bf_sbox[256 * 4] = {
/* /*
* The blowfish encipher, processes 64-bit blocks. * The blowfish encipher, processes 64-bit blocks.
* NOTE: This function MUSTN'T respect endianess * NOTE: This function MUSTN'T respect endianess
*/ */
static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src) static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src)
{ {
...@@ -395,7 +395,7 @@ static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ...@@ -395,7 +395,7 @@ static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
out_blk[1] = cpu_to_be32(yl); out_blk[1] = cpu_to_be32(yl);
} }
/* /*
* Calculates the blowfish S and P boxes for encryption and decryption. * Calculates the blowfish S and P boxes for encryption and decryption.
*/ */
static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
...@@ -417,10 +417,10 @@ static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) ...@@ -417,10 +417,10 @@ static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
/* Actual subkey generation */ /* Actual subkey generation */
for (j = 0, i = 0; i < 16 + 2; i++) { for (j = 0, i = 0; i < 16 + 2; i++) {
temp = (((u32 )key[j] << 24) | temp = (((u32)key[j] << 24) |
((u32 )key[(j + 1) % keylen] << 16) | ((u32)key[(j + 1) % keylen] << 16) |
((u32 )key[(j + 2) % keylen] << 8) | ((u32)key[(j + 2) % keylen] << 8) |
((u32 )key[(j + 3) % keylen])); ((u32)key[(j + 3) % keylen]));
P[i] = P[i] ^ temp; P[i] = P[i] ^ temp;
j = (j + 4) % keylen; j = (j + 4) % keylen;
...@@ -444,7 +444,7 @@ static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) ...@@ -444,7 +444,7 @@ static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
S[count + 1] = data[1]; S[count + 1] = data[1];
} }
} }
/* Bruce says not to bother with the weak key check. */ /* Bruce says not to bother with the weak key check. */
return 0; return 0;
} }
......
...@@ -39,271 +39,271 @@ ...@@ -39,271 +39,271 @@
#include <asm/unaligned.h> #include <asm/unaligned.h>
static const u32 camellia_sp1110[256] = { static const u32 camellia_sp1110[256] = {
0x70707000,0x82828200,0x2c2c2c00,0xececec00, 0x70707000, 0x82828200, 0x2c2c2c00, 0xececec00,
0xb3b3b300,0x27272700,0xc0c0c000,0xe5e5e500, 0xb3b3b300, 0x27272700, 0xc0c0c000, 0xe5e5e500,
0xe4e4e400,0x85858500,0x57575700,0x35353500, 0xe4e4e400, 0x85858500, 0x57575700, 0x35353500,
0xeaeaea00,0x0c0c0c00,0xaeaeae00,0x41414100, 0xeaeaea00, 0x0c0c0c00, 0xaeaeae00, 0x41414100,
0x23232300,0xefefef00,0x6b6b6b00,0x93939300, 0x23232300, 0xefefef00, 0x6b6b6b00, 0x93939300,
0x45454500,0x19191900,0xa5a5a500,0x21212100, 0x45454500, 0x19191900, 0xa5a5a500, 0x21212100,
0xededed00,0x0e0e0e00,0x4f4f4f00,0x4e4e4e00, 0xededed00, 0x0e0e0e00, 0x4f4f4f00, 0x4e4e4e00,
0x1d1d1d00,0x65656500,0x92929200,0xbdbdbd00, 0x1d1d1d00, 0x65656500, 0x92929200, 0xbdbdbd00,
0x86868600,0xb8b8b800,0xafafaf00,0x8f8f8f00, 0x86868600, 0xb8b8b800, 0xafafaf00, 0x8f8f8f00,
0x7c7c7c00,0xebebeb00,0x1f1f1f00,0xcecece00, 0x7c7c7c00, 0xebebeb00, 0x1f1f1f00, 0xcecece00,
0x3e3e3e00,0x30303000,0xdcdcdc00,0x5f5f5f00, 0x3e3e3e00, 0x30303000, 0xdcdcdc00, 0x5f5f5f00,
0x5e5e5e00,0xc5c5c500,0x0b0b0b00,0x1a1a1a00, 0x5e5e5e00, 0xc5c5c500, 0x0b0b0b00, 0x1a1a1a00,
0xa6a6a600,0xe1e1e100,0x39393900,0xcacaca00, 0xa6a6a600, 0xe1e1e100, 0x39393900, 0xcacaca00,
0xd5d5d500,0x47474700,0x5d5d5d00,0x3d3d3d00, 0xd5d5d500, 0x47474700, 0x5d5d5d00, 0x3d3d3d00,
0xd9d9d900,0x01010100,0x5a5a5a00,0xd6d6d600, 0xd9d9d900, 0x01010100, 0x5a5a5a00, 0xd6d6d600,
0x51515100,0x56565600,0x6c6c6c00,0x4d4d4d00, 0x51515100, 0x56565600, 0x6c6c6c00, 0x4d4d4d00,
0x8b8b8b00,0x0d0d0d00,0x9a9a9a00,0x66666600, 0x8b8b8b00, 0x0d0d0d00, 0x9a9a9a00, 0x66666600,
0xfbfbfb00,0xcccccc00,0xb0b0b000,0x2d2d2d00, 0xfbfbfb00, 0xcccccc00, 0xb0b0b000, 0x2d2d2d00,
0x74747400,0x12121200,0x2b2b2b00,0x20202000, 0x74747400, 0x12121200, 0x2b2b2b00, 0x20202000,
0xf0f0f000,0xb1b1b100,0x84848400,0x99999900, 0xf0f0f000, 0xb1b1b100, 0x84848400, 0x99999900,
0xdfdfdf00,0x4c4c4c00,0xcbcbcb00,0xc2c2c200, 0xdfdfdf00, 0x4c4c4c00, 0xcbcbcb00, 0xc2c2c200,
0x34343400,0x7e7e7e00,0x76767600,0x05050500, 0x34343400, 0x7e7e7e00, 0x76767600, 0x05050500,
0x6d6d6d00,0xb7b7b700,0xa9a9a900,0x31313100, 0x6d6d6d00, 0xb7b7b700, 0xa9a9a900, 0x31313100,
0xd1d1d100,0x17171700,0x04040400,0xd7d7d700, 0xd1d1d100, 0x17171700, 0x04040400, 0xd7d7d700,
0x14141400,0x58585800,0x3a3a3a00,0x61616100, 0x14141400, 0x58585800, 0x3a3a3a00, 0x61616100,
0xdedede00,0x1b1b1b00,0x11111100,0x1c1c1c00, 0xdedede00, 0x1b1b1b00, 0x11111100, 0x1c1c1c00,
0x32323200,0x0f0f0f00,0x9c9c9c00,0x16161600, 0x32323200, 0x0f0f0f00, 0x9c9c9c00, 0x16161600,
0x53535300,0x18181800,0xf2f2f200,0x22222200, 0x53535300, 0x18181800, 0xf2f2f200, 0x22222200,
0xfefefe00,0x44444400,0xcfcfcf00,0xb2b2b200, 0xfefefe00, 0x44444400, 0xcfcfcf00, 0xb2b2b200,
0xc3c3c300,0xb5b5b500,0x7a7a7a00,0x91919100, 0xc3c3c300, 0xb5b5b500, 0x7a7a7a00, 0x91919100,
0x24242400,0x08080800,0xe8e8e800,0xa8a8a800, 0x24242400, 0x08080800, 0xe8e8e800, 0xa8a8a800,
0x60606000,0xfcfcfc00,0x69696900,0x50505000, 0x60606000, 0xfcfcfc00, 0x69696900, 0x50505000,
0xaaaaaa00,0xd0d0d000,0xa0a0a000,0x7d7d7d00, 0xaaaaaa00, 0xd0d0d000, 0xa0a0a000, 0x7d7d7d00,
0xa1a1a100,0x89898900,0x62626200,0x97979700, 0xa1a1a100, 0x89898900, 0x62626200, 0x97979700,
0x54545400,0x5b5b5b00,0x1e1e1e00,0x95959500, 0x54545400, 0x5b5b5b00, 0x1e1e1e00, 0x95959500,
0xe0e0e000,0xffffff00,0x64646400,0xd2d2d200, 0xe0e0e000, 0xffffff00, 0x64646400, 0xd2d2d200,
0x10101000,0xc4c4c400,0x00000000,0x48484800, 0x10101000, 0xc4c4c400, 0x00000000, 0x48484800,
0xa3a3a300,0xf7f7f700,0x75757500,0xdbdbdb00, 0xa3a3a300, 0xf7f7f700, 0x75757500, 0xdbdbdb00,
0x8a8a8a00,0x03030300,0xe6e6e600,0xdadada00, 0x8a8a8a00, 0x03030300, 0xe6e6e600, 0xdadada00,
0x09090900,0x3f3f3f00,0xdddddd00,0x94949400, 0x09090900, 0x3f3f3f00, 0xdddddd00, 0x94949400,
0x87878700,0x5c5c5c00,0x83838300,0x02020200, 0x87878700, 0x5c5c5c00, 0x83838300, 0x02020200,
0xcdcdcd00,0x4a4a4a00,0x90909000,0x33333300, 0xcdcdcd00, 0x4a4a4a00, 0x90909000, 0x33333300,
0x73737300,0x67676700,0xf6f6f600,0xf3f3f300, 0x73737300, 0x67676700, 0xf6f6f600, 0xf3f3f300,
0x9d9d9d00,0x7f7f7f00,0xbfbfbf00,0xe2e2e200, 0x9d9d9d00, 0x7f7f7f00, 0xbfbfbf00, 0xe2e2e200,
0x52525200,0x9b9b9b00,0xd8d8d800,0x26262600, 0x52525200, 0x9b9b9b00, 0xd8d8d800, 0x26262600,
0xc8c8c800,0x37373700,0xc6c6c600,0x3b3b3b00, 0xc8c8c800, 0x37373700, 0xc6c6c600, 0x3b3b3b00,
0x81818100,0x96969600,0x6f6f6f00,0x4b4b4b00, 0x81818100, 0x96969600, 0x6f6f6f00, 0x4b4b4b00,
0x13131300,0xbebebe00,0x63636300,0x2e2e2e00, 0x13131300, 0xbebebe00, 0x63636300, 0x2e2e2e00,
0xe9e9e900,0x79797900,0xa7a7a700,0x8c8c8c00, 0xe9e9e900, 0x79797900, 0xa7a7a700, 0x8c8c8c00,
0x9f9f9f00,0x6e6e6e00,0xbcbcbc00,0x8e8e8e00, 0x9f9f9f00, 0x6e6e6e00, 0xbcbcbc00, 0x8e8e8e00,
0x29292900,0xf5f5f500,0xf9f9f900,0xb6b6b600, 0x29292900, 0xf5f5f500, 0xf9f9f900, 0xb6b6b600,
0x2f2f2f00,0xfdfdfd00,0xb4b4b400,0x59595900, 0x2f2f2f00, 0xfdfdfd00, 0xb4b4b400, 0x59595900,
0x78787800,0x98989800,0x06060600,0x6a6a6a00, 0x78787800, 0x98989800, 0x06060600, 0x6a6a6a00,
0xe7e7e700,0x46464600,0x71717100,0xbababa00, 0xe7e7e700, 0x46464600, 0x71717100, 0xbababa00,
0xd4d4d400,0x25252500,0xababab00,0x42424200, 0xd4d4d400, 0x25252500, 0xababab00, 0x42424200,
0x88888800,0xa2a2a200,0x8d8d8d00,0xfafafa00, 0x88888800, 0xa2a2a200, 0x8d8d8d00, 0xfafafa00,
0x72727200,0x07070700,0xb9b9b900,0x55555500, 0x72727200, 0x07070700, 0xb9b9b900, 0x55555500,
0xf8f8f800,0xeeeeee00,0xacacac00,0x0a0a0a00, 0xf8f8f800, 0xeeeeee00, 0xacacac00, 0x0a0a0a00,
0x36363600,0x49494900,0x2a2a2a00,0x68686800, 0x36363600, 0x49494900, 0x2a2a2a00, 0x68686800,
0x3c3c3c00,0x38383800,0xf1f1f100,0xa4a4a400, 0x3c3c3c00, 0x38383800, 0xf1f1f100, 0xa4a4a400,
0x40404000,0x28282800,0xd3d3d300,0x7b7b7b00, 0x40404000, 0x28282800, 0xd3d3d300, 0x7b7b7b00,
0xbbbbbb00,0xc9c9c900,0x43434300,0xc1c1c100, 0xbbbbbb00, 0xc9c9c900, 0x43434300, 0xc1c1c100,
0x15151500,0xe3e3e300,0xadadad00,0xf4f4f400, 0x15151500, 0xe3e3e300, 0xadadad00, 0xf4f4f400,
0x77777700,0xc7c7c700,0x80808000,0x9e9e9e00, 0x77777700, 0xc7c7c700, 0x80808000, 0x9e9e9e00,
}; };
static const u32 camellia_sp0222[256] = { static const u32 camellia_sp0222[256] = {
0x00e0e0e0,0x00050505,0x00585858,0x00d9d9d9, 0x00e0e0e0, 0x00050505, 0x00585858, 0x00d9d9d9,
0x00676767,0x004e4e4e,0x00818181,0x00cbcbcb, 0x00676767, 0x004e4e4e, 0x00818181, 0x00cbcbcb,
0x00c9c9c9,0x000b0b0b,0x00aeaeae,0x006a6a6a, 0x00c9c9c9, 0x000b0b0b, 0x00aeaeae, 0x006a6a6a,
0x00d5d5d5,0x00181818,0x005d5d5d,0x00828282, 0x00d5d5d5, 0x00181818, 0x005d5d5d, 0x00828282,
0x00464646,0x00dfdfdf,0x00d6d6d6,0x00272727, 0x00464646, 0x00dfdfdf, 0x00d6d6d6, 0x00272727,
0x008a8a8a,0x00323232,0x004b4b4b,0x00424242, 0x008a8a8a, 0x00323232, 0x004b4b4b, 0x00424242,
0x00dbdbdb,0x001c1c1c,0x009e9e9e,0x009c9c9c, 0x00dbdbdb, 0x001c1c1c, 0x009e9e9e, 0x009c9c9c,
0x003a3a3a,0x00cacaca,0x00252525,0x007b7b7b, 0x003a3a3a, 0x00cacaca, 0x00252525, 0x007b7b7b,
0x000d0d0d,0x00717171,0x005f5f5f,0x001f1f1f, 0x000d0d0d, 0x00717171, 0x005f5f5f, 0x001f1f1f,
0x00f8f8f8,0x00d7d7d7,0x003e3e3e,0x009d9d9d, 0x00f8f8f8, 0x00d7d7d7, 0x003e3e3e, 0x009d9d9d,
0x007c7c7c,0x00606060,0x00b9b9b9,0x00bebebe, 0x007c7c7c, 0x00606060, 0x00b9b9b9, 0x00bebebe,
0x00bcbcbc,0x008b8b8b,0x00161616,0x00343434, 0x00bcbcbc, 0x008b8b8b, 0x00161616, 0x00343434,
0x004d4d4d,0x00c3c3c3,0x00727272,0x00959595, 0x004d4d4d, 0x00c3c3c3, 0x00727272, 0x00959595,
0x00ababab,0x008e8e8e,0x00bababa,0x007a7a7a, 0x00ababab, 0x008e8e8e, 0x00bababa, 0x007a7a7a,
0x00b3b3b3,0x00020202,0x00b4b4b4,0x00adadad, 0x00b3b3b3, 0x00020202, 0x00b4b4b4, 0x00adadad,
0x00a2a2a2,0x00acacac,0x00d8d8d8,0x009a9a9a, 0x00a2a2a2, 0x00acacac, 0x00d8d8d8, 0x009a9a9a,
0x00171717,0x001a1a1a,0x00353535,0x00cccccc, 0x00171717, 0x001a1a1a, 0x00353535, 0x00cccccc,
0x00f7f7f7,0x00999999,0x00616161,0x005a5a5a, 0x00f7f7f7, 0x00999999, 0x00616161, 0x005a5a5a,
0x00e8e8e8,0x00242424,0x00565656,0x00404040, 0x00e8e8e8, 0x00242424, 0x00565656, 0x00404040,
0x00e1e1e1,0x00636363,0x00090909,0x00333333, 0x00e1e1e1, 0x00636363, 0x00090909, 0x00333333,
0x00bfbfbf,0x00989898,0x00979797,0x00858585, 0x00bfbfbf, 0x00989898, 0x00979797, 0x00858585,
0x00686868,0x00fcfcfc,0x00ececec,0x000a0a0a, 0x00686868, 0x00fcfcfc, 0x00ececec, 0x000a0a0a,
0x00dadada,0x006f6f6f,0x00535353,0x00626262, 0x00dadada, 0x006f6f6f, 0x00535353, 0x00626262,
0x00a3a3a3,0x002e2e2e,0x00080808,0x00afafaf, 0x00a3a3a3, 0x002e2e2e, 0x00080808, 0x00afafaf,
0x00282828,0x00b0b0b0,0x00747474,0x00c2c2c2, 0x00282828, 0x00b0b0b0, 0x00747474, 0x00c2c2c2,
0x00bdbdbd,0x00363636,0x00222222,0x00383838, 0x00bdbdbd, 0x00363636, 0x00222222, 0x00383838,
0x00646464,0x001e1e1e,0x00393939,0x002c2c2c, 0x00646464, 0x001e1e1e, 0x00393939, 0x002c2c2c,
0x00a6a6a6,0x00303030,0x00e5e5e5,0x00444444, 0x00a6a6a6, 0x00303030, 0x00e5e5e5, 0x00444444,
0x00fdfdfd,0x00888888,0x009f9f9f,0x00656565, 0x00fdfdfd, 0x00888888, 0x009f9f9f, 0x00656565,
0x00878787,0x006b6b6b,0x00f4f4f4,0x00232323, 0x00878787, 0x006b6b6b, 0x00f4f4f4, 0x00232323,
0x00484848,0x00101010,0x00d1d1d1,0x00515151, 0x00484848, 0x00101010, 0x00d1d1d1, 0x00515151,
0x00c0c0c0,0x00f9f9f9,0x00d2d2d2,0x00a0a0a0, 0x00c0c0c0, 0x00f9f9f9, 0x00d2d2d2, 0x00a0a0a0,
0x00555555,0x00a1a1a1,0x00414141,0x00fafafa, 0x00555555, 0x00a1a1a1, 0x00414141, 0x00fafafa,
0x00434343,0x00131313,0x00c4c4c4,0x002f2f2f, 0x00434343, 0x00131313, 0x00c4c4c4, 0x002f2f2f,
0x00a8a8a8,0x00b6b6b6,0x003c3c3c,0x002b2b2b, 0x00a8a8a8, 0x00b6b6b6, 0x003c3c3c, 0x002b2b2b,
0x00c1c1c1,0x00ffffff,0x00c8c8c8,0x00a5a5a5, 0x00c1c1c1, 0x00ffffff, 0x00c8c8c8, 0x00a5a5a5,
0x00202020,0x00898989,0x00000000,0x00909090, 0x00202020, 0x00898989, 0x00000000, 0x00909090,
0x00474747,0x00efefef,0x00eaeaea,0x00b7b7b7, 0x00474747, 0x00efefef, 0x00eaeaea, 0x00b7b7b7,
0x00151515,0x00060606,0x00cdcdcd,0x00b5b5b5, 0x00151515, 0x00060606, 0x00cdcdcd, 0x00b5b5b5,
0x00121212,0x007e7e7e,0x00bbbbbb,0x00292929, 0x00121212, 0x007e7e7e, 0x00bbbbbb, 0x00292929,
0x000f0f0f,0x00b8b8b8,0x00070707,0x00040404, 0x000f0f0f, 0x00b8b8b8, 0x00070707, 0x00040404,
0x009b9b9b,0x00949494,0x00212121,0x00666666, 0x009b9b9b, 0x00949494, 0x00212121, 0x00666666,
0x00e6e6e6,0x00cecece,0x00ededed,0x00e7e7e7, 0x00e6e6e6, 0x00cecece, 0x00ededed, 0x00e7e7e7,
0x003b3b3b,0x00fefefe,0x007f7f7f,0x00c5c5c5, 0x003b3b3b, 0x00fefefe, 0x007f7f7f, 0x00c5c5c5,
0x00a4a4a4,0x00373737,0x00b1b1b1,0x004c4c4c, 0x00a4a4a4, 0x00373737, 0x00b1b1b1, 0x004c4c4c,
0x00919191,0x006e6e6e,0x008d8d8d,0x00767676, 0x00919191, 0x006e6e6e, 0x008d8d8d, 0x00767676,
0x00030303,0x002d2d2d,0x00dedede,0x00969696, 0x00030303, 0x002d2d2d, 0x00dedede, 0x00969696,
0x00262626,0x007d7d7d,0x00c6c6c6,0x005c5c5c, 0x00262626, 0x007d7d7d, 0x00c6c6c6, 0x005c5c5c,
0x00d3d3d3,0x00f2f2f2,0x004f4f4f,0x00191919, 0x00d3d3d3, 0x00f2f2f2, 0x004f4f4f, 0x00191919,
0x003f3f3f,0x00dcdcdc,0x00797979,0x001d1d1d, 0x003f3f3f, 0x00dcdcdc, 0x00797979, 0x001d1d1d,
0x00525252,0x00ebebeb,0x00f3f3f3,0x006d6d6d, 0x00525252, 0x00ebebeb, 0x00f3f3f3, 0x006d6d6d,
0x005e5e5e,0x00fbfbfb,0x00696969,0x00b2b2b2, 0x005e5e5e, 0x00fbfbfb, 0x00696969, 0x00b2b2b2,
0x00f0f0f0,0x00313131,0x000c0c0c,0x00d4d4d4, 0x00f0f0f0, 0x00313131, 0x000c0c0c, 0x00d4d4d4,
0x00cfcfcf,0x008c8c8c,0x00e2e2e2,0x00757575, 0x00cfcfcf, 0x008c8c8c, 0x00e2e2e2, 0x00757575,
0x00a9a9a9,0x004a4a4a,0x00575757,0x00848484, 0x00a9a9a9, 0x004a4a4a, 0x00575757, 0x00848484,
0x00111111,0x00454545,0x001b1b1b,0x00f5f5f5, 0x00111111, 0x00454545, 0x001b1b1b, 0x00f5f5f5,
0x00e4e4e4,0x000e0e0e,0x00737373,0x00aaaaaa, 0x00e4e4e4, 0x000e0e0e, 0x00737373, 0x00aaaaaa,
0x00f1f1f1,0x00dddddd,0x00595959,0x00141414, 0x00f1f1f1, 0x00dddddd, 0x00595959, 0x00141414,
0x006c6c6c,0x00929292,0x00545454,0x00d0d0d0, 0x006c6c6c, 0x00929292, 0x00545454, 0x00d0d0d0,
0x00787878,0x00707070,0x00e3e3e3,0x00494949, 0x00787878, 0x00707070, 0x00e3e3e3, 0x00494949,
0x00808080,0x00505050,0x00a7a7a7,0x00f6f6f6, 0x00808080, 0x00505050, 0x00a7a7a7, 0x00f6f6f6,
0x00777777,0x00939393,0x00868686,0x00838383, 0x00777777, 0x00939393, 0x00868686, 0x00838383,
0x002a2a2a,0x00c7c7c7,0x005b5b5b,0x00e9e9e9, 0x002a2a2a, 0x00c7c7c7, 0x005b5b5b, 0x00e9e9e9,
0x00eeeeee,0x008f8f8f,0x00010101,0x003d3d3d, 0x00eeeeee, 0x008f8f8f, 0x00010101, 0x003d3d3d,
}; };
static const u32 camellia_sp3033[256] = { static const u32 camellia_sp3033[256] = {
0x38003838,0x41004141,0x16001616,0x76007676, 0x38003838, 0x41004141, 0x16001616, 0x76007676,
0xd900d9d9,0x93009393,0x60006060,0xf200f2f2, 0xd900d9d9, 0x93009393, 0x60006060, 0xf200f2f2,
0x72007272,0xc200c2c2,0xab00abab,0x9a009a9a, 0x72007272, 0xc200c2c2, 0xab00abab, 0x9a009a9a,
0x75007575,0x06000606,0x57005757,0xa000a0a0, 0x75007575, 0x06000606, 0x57005757, 0xa000a0a0,
0x91009191,0xf700f7f7,0xb500b5b5,0xc900c9c9, 0x91009191, 0xf700f7f7, 0xb500b5b5, 0xc900c9c9,
0xa200a2a2,0x8c008c8c,0xd200d2d2,0x90009090, 0xa200a2a2, 0x8c008c8c, 0xd200d2d2, 0x90009090,
0xf600f6f6,0x07000707,0xa700a7a7,0x27002727, 0xf600f6f6, 0x07000707, 0xa700a7a7, 0x27002727,
0x8e008e8e,0xb200b2b2,0x49004949,0xde00dede, 0x8e008e8e, 0xb200b2b2, 0x49004949, 0xde00dede,
0x43004343,0x5c005c5c,0xd700d7d7,0xc700c7c7, 0x43004343, 0x5c005c5c, 0xd700d7d7, 0xc700c7c7,
0x3e003e3e,0xf500f5f5,0x8f008f8f,0x67006767, 0x3e003e3e, 0xf500f5f5, 0x8f008f8f, 0x67006767,
0x1f001f1f,0x18001818,0x6e006e6e,0xaf00afaf, 0x1f001f1f, 0x18001818, 0x6e006e6e, 0xaf00afaf,
0x2f002f2f,0xe200e2e2,0x85008585,0x0d000d0d, 0x2f002f2f, 0xe200e2e2, 0x85008585, 0x0d000d0d,
0x53005353,0xf000f0f0,0x9c009c9c,0x65006565, 0x53005353, 0xf000f0f0, 0x9c009c9c, 0x65006565,
0xea00eaea,0xa300a3a3,0xae00aeae,0x9e009e9e, 0xea00eaea, 0xa300a3a3, 0xae00aeae, 0x9e009e9e,
0xec00ecec,0x80008080,0x2d002d2d,0x6b006b6b, 0xec00ecec, 0x80008080, 0x2d002d2d, 0x6b006b6b,
0xa800a8a8,0x2b002b2b,0x36003636,0xa600a6a6, 0xa800a8a8, 0x2b002b2b, 0x36003636, 0xa600a6a6,
0xc500c5c5,0x86008686,0x4d004d4d,0x33003333, 0xc500c5c5, 0x86008686, 0x4d004d4d, 0x33003333,
0xfd00fdfd,0x66006666,0x58005858,0x96009696, 0xfd00fdfd, 0x66006666, 0x58005858, 0x96009696,
0x3a003a3a,0x09000909,0x95009595,0x10001010, 0x3a003a3a, 0x09000909, 0x95009595, 0x10001010,
0x78007878,0xd800d8d8,0x42004242,0xcc00cccc, 0x78007878, 0xd800d8d8, 0x42004242, 0xcc00cccc,
0xef00efef,0x26002626,0xe500e5e5,0x61006161, 0xef00efef, 0x26002626, 0xe500e5e5, 0x61006161,
0x1a001a1a,0x3f003f3f,0x3b003b3b,0x82008282, 0x1a001a1a, 0x3f003f3f, 0x3b003b3b, 0x82008282,
0xb600b6b6,0xdb00dbdb,0xd400d4d4,0x98009898, 0xb600b6b6, 0xdb00dbdb, 0xd400d4d4, 0x98009898,
0xe800e8e8,0x8b008b8b,0x02000202,0xeb00ebeb, 0xe800e8e8, 0x8b008b8b, 0x02000202, 0xeb00ebeb,
0x0a000a0a,0x2c002c2c,0x1d001d1d,0xb000b0b0, 0x0a000a0a, 0x2c002c2c, 0x1d001d1d, 0xb000b0b0,
0x6f006f6f,0x8d008d8d,0x88008888,0x0e000e0e, 0x6f006f6f, 0x8d008d8d, 0x88008888, 0x0e000e0e,
0x19001919,0x87008787,0x4e004e4e,0x0b000b0b, 0x19001919, 0x87008787, 0x4e004e4e, 0x0b000b0b,
0xa900a9a9,0x0c000c0c,0x79007979,0x11001111, 0xa900a9a9, 0x0c000c0c, 0x79007979, 0x11001111,
0x7f007f7f,0x22002222,0xe700e7e7,0x59005959, 0x7f007f7f, 0x22002222, 0xe700e7e7, 0x59005959,
0xe100e1e1,0xda00dada,0x3d003d3d,0xc800c8c8, 0xe100e1e1, 0xda00dada, 0x3d003d3d, 0xc800c8c8,
0x12001212,0x04000404,0x74007474,0x54005454, 0x12001212, 0x04000404, 0x74007474, 0x54005454,
0x30003030,0x7e007e7e,0xb400b4b4,0x28002828, 0x30003030, 0x7e007e7e, 0xb400b4b4, 0x28002828,
0x55005555,0x68006868,0x50005050,0xbe00bebe, 0x55005555, 0x68006868, 0x50005050, 0xbe00bebe,
0xd000d0d0,0xc400c4c4,0x31003131,0xcb00cbcb, 0xd000d0d0, 0xc400c4c4, 0x31003131, 0xcb00cbcb,
0x2a002a2a,0xad00adad,0x0f000f0f,0xca00caca, 0x2a002a2a, 0xad00adad, 0x0f000f0f, 0xca00caca,
0x70007070,0xff00ffff,0x32003232,0x69006969, 0x70007070, 0xff00ffff, 0x32003232, 0x69006969,
0x08000808,0x62006262,0x00000000,0x24002424, 0x08000808, 0x62006262, 0x00000000, 0x24002424,
0xd100d1d1,0xfb00fbfb,0xba00baba,0xed00eded, 0xd100d1d1, 0xfb00fbfb, 0xba00baba, 0xed00eded,
0x45004545,0x81008181,0x73007373,0x6d006d6d, 0x45004545, 0x81008181, 0x73007373, 0x6d006d6d,
0x84008484,0x9f009f9f,0xee00eeee,0x4a004a4a, 0x84008484, 0x9f009f9f, 0xee00eeee, 0x4a004a4a,
0xc300c3c3,0x2e002e2e,0xc100c1c1,0x01000101, 0xc300c3c3, 0x2e002e2e, 0xc100c1c1, 0x01000101,
0xe600e6e6,0x25002525,0x48004848,0x99009999, 0xe600e6e6, 0x25002525, 0x48004848, 0x99009999,
0xb900b9b9,0xb300b3b3,0x7b007b7b,0xf900f9f9, 0xb900b9b9, 0xb300b3b3, 0x7b007b7b, 0xf900f9f9,
0xce00cece,0xbf00bfbf,0xdf00dfdf,0x71007171, 0xce00cece, 0xbf00bfbf, 0xdf00dfdf, 0x71007171,
0x29002929,0xcd00cdcd,0x6c006c6c,0x13001313, 0x29002929, 0xcd00cdcd, 0x6c006c6c, 0x13001313,
0x64006464,0x9b009b9b,0x63006363,0x9d009d9d, 0x64006464, 0x9b009b9b, 0x63006363, 0x9d009d9d,
0xc000c0c0,0x4b004b4b,0xb700b7b7,0xa500a5a5, 0xc000c0c0, 0x4b004b4b, 0xb700b7b7, 0xa500a5a5,
0x89008989,0x5f005f5f,0xb100b1b1,0x17001717, 0x89008989, 0x5f005f5f, 0xb100b1b1, 0x17001717,
0xf400f4f4,0xbc00bcbc,0xd300d3d3,0x46004646, 0xf400f4f4, 0xbc00bcbc, 0xd300d3d3, 0x46004646,
0xcf00cfcf,0x37003737,0x5e005e5e,0x47004747, 0xcf00cfcf, 0x37003737, 0x5e005e5e, 0x47004747,
0x94009494,0xfa00fafa,0xfc00fcfc,0x5b005b5b, 0x94009494, 0xfa00fafa, 0xfc00fcfc, 0x5b005b5b,
0x97009797,0xfe00fefe,0x5a005a5a,0xac00acac, 0x97009797, 0xfe00fefe, 0x5a005a5a, 0xac00acac,
0x3c003c3c,0x4c004c4c,0x03000303,0x35003535, 0x3c003c3c, 0x4c004c4c, 0x03000303, 0x35003535,
0xf300f3f3,0x23002323,0xb800b8b8,0x5d005d5d, 0xf300f3f3, 0x23002323, 0xb800b8b8, 0x5d005d5d,
0x6a006a6a,0x92009292,0xd500d5d5,0x21002121, 0x6a006a6a, 0x92009292, 0xd500d5d5, 0x21002121,
0x44004444,0x51005151,0xc600c6c6,0x7d007d7d, 0x44004444, 0x51005151, 0xc600c6c6, 0x7d007d7d,
0x39003939,0x83008383,0xdc00dcdc,0xaa00aaaa, 0x39003939, 0x83008383, 0xdc00dcdc, 0xaa00aaaa,
0x7c007c7c,0x77007777,0x56005656,0x05000505, 0x7c007c7c, 0x77007777, 0x56005656, 0x05000505,
0x1b001b1b,0xa400a4a4,0x15001515,0x34003434, 0x1b001b1b, 0xa400a4a4, 0x15001515, 0x34003434,
0x1e001e1e,0x1c001c1c,0xf800f8f8,0x52005252, 0x1e001e1e, 0x1c001c1c, 0xf800f8f8, 0x52005252,
0x20002020,0x14001414,0xe900e9e9,0xbd00bdbd, 0x20002020, 0x14001414, 0xe900e9e9, 0xbd00bdbd,
0xdd00dddd,0xe400e4e4,0xa100a1a1,0xe000e0e0, 0xdd00dddd, 0xe400e4e4, 0xa100a1a1, 0xe000e0e0,
0x8a008a8a,0xf100f1f1,0xd600d6d6,0x7a007a7a, 0x8a008a8a, 0xf100f1f1, 0xd600d6d6, 0x7a007a7a,
0xbb00bbbb,0xe300e3e3,0x40004040,0x4f004f4f, 0xbb00bbbb, 0xe300e3e3, 0x40004040, 0x4f004f4f,
}; };
static const u32 camellia_sp4404[256] = { static const u32 camellia_sp4404[256] = {
0x70700070,0x2c2c002c,0xb3b300b3,0xc0c000c0, 0x70700070, 0x2c2c002c, 0xb3b300b3, 0xc0c000c0,
0xe4e400e4,0x57570057,0xeaea00ea,0xaeae00ae, 0xe4e400e4, 0x57570057, 0xeaea00ea, 0xaeae00ae,
0x23230023,0x6b6b006b,0x45450045,0xa5a500a5, 0x23230023, 0x6b6b006b, 0x45450045, 0xa5a500a5,
0xeded00ed,0x4f4f004f,0x1d1d001d,0x92920092, 0xeded00ed, 0x4f4f004f, 0x1d1d001d, 0x92920092,
0x86860086,0xafaf00af,0x7c7c007c,0x1f1f001f, 0x86860086, 0xafaf00af, 0x7c7c007c, 0x1f1f001f,
0x3e3e003e,0xdcdc00dc,0x5e5e005e,0x0b0b000b, 0x3e3e003e, 0xdcdc00dc, 0x5e5e005e, 0x0b0b000b,
0xa6a600a6,0x39390039,0xd5d500d5,0x5d5d005d, 0xa6a600a6, 0x39390039, 0xd5d500d5, 0x5d5d005d,
0xd9d900d9,0x5a5a005a,0x51510051,0x6c6c006c, 0xd9d900d9, 0x5a5a005a, 0x51510051, 0x6c6c006c,
0x8b8b008b,0x9a9a009a,0xfbfb00fb,0xb0b000b0, 0x8b8b008b, 0x9a9a009a, 0xfbfb00fb, 0xb0b000b0,
0x74740074,0x2b2b002b,0xf0f000f0,0x84840084, 0x74740074, 0x2b2b002b, 0xf0f000f0, 0x84840084,
0xdfdf00df,0xcbcb00cb,0x34340034,0x76760076, 0xdfdf00df, 0xcbcb00cb, 0x34340034, 0x76760076,
0x6d6d006d,0xa9a900a9,0xd1d100d1,0x04040004, 0x6d6d006d, 0xa9a900a9, 0xd1d100d1, 0x04040004,
0x14140014,0x3a3a003a,0xdede00de,0x11110011, 0x14140014, 0x3a3a003a, 0xdede00de, 0x11110011,
0x32320032,0x9c9c009c,0x53530053,0xf2f200f2, 0x32320032, 0x9c9c009c, 0x53530053, 0xf2f200f2,
0xfefe00fe,0xcfcf00cf,0xc3c300c3,0x7a7a007a, 0xfefe00fe, 0xcfcf00cf, 0xc3c300c3, 0x7a7a007a,
0x24240024,0xe8e800e8,0x60600060,0x69690069, 0x24240024, 0xe8e800e8, 0x60600060, 0x69690069,
0xaaaa00aa,0xa0a000a0,0xa1a100a1,0x62620062, 0xaaaa00aa, 0xa0a000a0, 0xa1a100a1, 0x62620062,
0x54540054,0x1e1e001e,0xe0e000e0,0x64640064, 0x54540054, 0x1e1e001e, 0xe0e000e0, 0x64640064,
0x10100010,0x00000000,0xa3a300a3,0x75750075, 0x10100010, 0x00000000, 0xa3a300a3, 0x75750075,
0x8a8a008a,0xe6e600e6,0x09090009,0xdddd00dd, 0x8a8a008a, 0xe6e600e6, 0x09090009, 0xdddd00dd,
0x87870087,0x83830083,0xcdcd00cd,0x90900090, 0x87870087, 0x83830083, 0xcdcd00cd, 0x90900090,
0x73730073,0xf6f600f6,0x9d9d009d,0xbfbf00bf, 0x73730073, 0xf6f600f6, 0x9d9d009d, 0xbfbf00bf,
0x52520052,0xd8d800d8,0xc8c800c8,0xc6c600c6, 0x52520052, 0xd8d800d8, 0xc8c800c8, 0xc6c600c6,
0x81810081,0x6f6f006f,0x13130013,0x63630063, 0x81810081, 0x6f6f006f, 0x13130013, 0x63630063,
0xe9e900e9,0xa7a700a7,0x9f9f009f,0xbcbc00bc, 0xe9e900e9, 0xa7a700a7, 0x9f9f009f, 0xbcbc00bc,
0x29290029,0xf9f900f9,0x2f2f002f,0xb4b400b4, 0x29290029, 0xf9f900f9, 0x2f2f002f, 0xb4b400b4,
0x78780078,0x06060006,0xe7e700e7,0x71710071, 0x78780078, 0x06060006, 0xe7e700e7, 0x71710071,
0xd4d400d4,0xabab00ab,0x88880088,0x8d8d008d, 0xd4d400d4, 0xabab00ab, 0x88880088, 0x8d8d008d,
0x72720072,0xb9b900b9,0xf8f800f8,0xacac00ac, 0x72720072, 0xb9b900b9, 0xf8f800f8, 0xacac00ac,
0x36360036,0x2a2a002a,0x3c3c003c,0xf1f100f1, 0x36360036, 0x2a2a002a, 0x3c3c003c, 0xf1f100f1,
0x40400040,0xd3d300d3,0xbbbb00bb,0x43430043, 0x40400040, 0xd3d300d3, 0xbbbb00bb, 0x43430043,
0x15150015,0xadad00ad,0x77770077,0x80800080, 0x15150015, 0xadad00ad, 0x77770077, 0x80800080,
0x82820082,0xecec00ec,0x27270027,0xe5e500e5, 0x82820082, 0xecec00ec, 0x27270027, 0xe5e500e5,
0x85850085,0x35350035,0x0c0c000c,0x41410041, 0x85850085, 0x35350035, 0x0c0c000c, 0x41410041,
0xefef00ef,0x93930093,0x19190019,0x21210021, 0xefef00ef, 0x93930093, 0x19190019, 0x21210021,
0x0e0e000e,0x4e4e004e,0x65650065,0xbdbd00bd, 0x0e0e000e, 0x4e4e004e, 0x65650065, 0xbdbd00bd,
0xb8b800b8,0x8f8f008f,0xebeb00eb,0xcece00ce, 0xb8b800b8, 0x8f8f008f, 0xebeb00eb, 0xcece00ce,
0x30300030,0x5f5f005f,0xc5c500c5,0x1a1a001a, 0x30300030, 0x5f5f005f, 0xc5c500c5, 0x1a1a001a,
0xe1e100e1,0xcaca00ca,0x47470047,0x3d3d003d, 0xe1e100e1, 0xcaca00ca, 0x47470047, 0x3d3d003d,
0x01010001,0xd6d600d6,0x56560056,0x4d4d004d, 0x01010001, 0xd6d600d6, 0x56560056, 0x4d4d004d,
0x0d0d000d,0x66660066,0xcccc00cc,0x2d2d002d, 0x0d0d000d, 0x66660066, 0xcccc00cc, 0x2d2d002d,
0x12120012,0x20200020,0xb1b100b1,0x99990099, 0x12120012, 0x20200020, 0xb1b100b1, 0x99990099,
0x4c4c004c,0xc2c200c2,0x7e7e007e,0x05050005, 0x4c4c004c, 0xc2c200c2, 0x7e7e007e, 0x05050005,
0xb7b700b7,0x31310031,0x17170017,0xd7d700d7, 0xb7b700b7, 0x31310031, 0x17170017, 0xd7d700d7,
0x58580058,0x61610061,0x1b1b001b,0x1c1c001c, 0x58580058, 0x61610061, 0x1b1b001b, 0x1c1c001c,
0x0f0f000f,0x16160016,0x18180018,0x22220022, 0x0f0f000f, 0x16160016, 0x18180018, 0x22220022,
0x44440044,0xb2b200b2,0xb5b500b5,0x91910091, 0x44440044, 0xb2b200b2, 0xb5b500b5, 0x91910091,
0x08080008,0xa8a800a8,0xfcfc00fc,0x50500050, 0x08080008, 0xa8a800a8, 0xfcfc00fc, 0x50500050,
0xd0d000d0,0x7d7d007d,0x89890089,0x97970097, 0xd0d000d0, 0x7d7d007d, 0x89890089, 0x97970097,
0x5b5b005b,0x95950095,0xffff00ff,0xd2d200d2, 0x5b5b005b, 0x95950095, 0xffff00ff, 0xd2d200d2,
0xc4c400c4,0x48480048,0xf7f700f7,0xdbdb00db, 0xc4c400c4, 0x48480048, 0xf7f700f7, 0xdbdb00db,
0x03030003,0xdada00da,0x3f3f003f,0x94940094, 0x03030003, 0xdada00da, 0x3f3f003f, 0x94940094,
0x5c5c005c,0x02020002,0x4a4a004a,0x33330033, 0x5c5c005c, 0x02020002, 0x4a4a004a, 0x33330033,
0x67670067,0xf3f300f3,0x7f7f007f,0xe2e200e2, 0x67670067, 0xf3f300f3, 0x7f7f007f, 0xe2e200e2,
0x9b9b009b,0x26260026,0x37370037,0x3b3b003b, 0x9b9b009b, 0x26260026, 0x37370037, 0x3b3b003b,
0x96960096,0x4b4b004b,0xbebe00be,0x2e2e002e, 0x96960096, 0x4b4b004b, 0xbebe00be, 0x2e2e002e,
0x79790079,0x8c8c008c,0x6e6e006e,0x8e8e008e, 0x79790079, 0x8c8c008c, 0x6e6e006e, 0x8e8e008e,
0xf5f500f5,0xb6b600b6,0xfdfd00fd,0x59590059, 0xf5f500f5, 0xb6b600b6, 0xfdfd00fd, 0x59590059,
0x98980098,0x6a6a006a,0x46460046,0xbaba00ba, 0x98980098, 0x6a6a006a, 0x46460046, 0xbaba00ba,
0x25250025,0x42420042,0xa2a200a2,0xfafa00fa, 0x25250025, 0x42420042, 0xa2a200a2, 0xfafa00fa,
0x07070007,0x55550055,0xeeee00ee,0x0a0a000a, 0x07070007, 0x55550055, 0xeeee00ee, 0x0a0a000a,
0x49490049,0x68680068,0x38380038,0xa4a400a4, 0x49490049, 0x68680068, 0x38380038, 0xa4a400a4,
0x28280028,0x7b7b007b,0xc9c900c9,0xc1c100c1, 0x28280028, 0x7b7b007b, 0xc9c900c9, 0xc1c100c1,
0xe3e300e3,0xf4f400f4,0xc7c700c7,0x9e9e009e, 0xe3e300e3, 0xf4f400f4, 0xc7c700c7, 0x9e9e009e,
}; };
...@@ -344,7 +344,7 @@ static const u32 camellia_sp4404[256] = { ...@@ -344,7 +344,7 @@ static const u32 camellia_sp4404[256] = {
lr = (lr << bits) + (rl >> (32 - bits)); \ lr = (lr << bits) + (rl >> (32 - bits)); \
rl = (rl << bits) + (rr >> (32 - bits)); \ rl = (rl << bits) + (rr >> (32 - bits)); \
rr = (rr << bits) + (w0 >> (32 - bits)); \ rr = (rr << bits) + (w0 >> (32 - bits)); \
} while(0) } while (0)
#define ROLDQo32(ll, lr, rl, rr, w0, w1, bits) \ #define ROLDQo32(ll, lr, rl, rr, w0, w1, bits) \
do { \ do { \
...@@ -354,7 +354,7 @@ static const u32 camellia_sp4404[256] = { ...@@ -354,7 +354,7 @@ static const u32 camellia_sp4404[256] = {
lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \ lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \
rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \ rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \
rr = (w0 << (bits - 32)) + (w1 >> (64 - bits)); \ rr = (w0 << (bits - 32)) + (w1 >> (64 - bits)); \
} while(0) } while (0)
#define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \ #define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \
do { \ do { \
...@@ -373,7 +373,7 @@ static const u32 camellia_sp4404[256] = { ...@@ -373,7 +373,7 @@ static const u32 camellia_sp4404[256] = {
yl ^= yr; \ yl ^= yr; \
yr = ror32(yr, 8); \ yr = ror32(yr, 8); \
yr ^= yl; \ yr ^= yl; \
} while(0) } while (0)
#define SUBKEY_L(INDEX) (subkey[(INDEX)*2]) #define SUBKEY_L(INDEX) (subkey[(INDEX)*2])
#define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1]) #define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1])
...@@ -835,7 +835,7 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) ...@@ -835,7 +835,7 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey)
static void camellia_setup192(const unsigned char *key, u32 *subkey) static void camellia_setup192(const unsigned char *key, u32 *subkey)
{ {
unsigned char kk[32]; unsigned char kk[32];
u32 krll, krlr, krrl,krrr; u32 krll, krlr, krrl, krrr;
memcpy(kk, key, 24); memcpy(kk, key, 24);
memcpy((unsigned char *)&krll, key+16, 4); memcpy((unsigned char *)&krll, key+16, 4);
...@@ -865,7 +865,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) ...@@ -865,7 +865,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
t1 |= lr; \ t1 |= lr; \
ll ^= t1; \ ll ^= t1; \
rr ^= rol32(t3, 1); \ rr ^= rol32(t3, 1); \
} while(0) } while (0)
#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \ #define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \
do { \ do { \
...@@ -881,12 +881,12 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) ...@@ -881,12 +881,12 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
ir ^= il ^ kr; \ ir ^= il ^ kr; \
yl ^= ir; \ yl ^= ir; \
yr ^= ror32(il, 8) ^ ir; \ yr ^= ror32(il, 8) ^ ir; \
} while(0) } while (0)
/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */ /* max = 24: 128bit encrypt, max = 32: 256bit encrypt */
static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max) static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max)
{ {
u32 il,ir,t0,t1; /* temporary variables */ u32 il, ir, t0, t1; /* temporary variables */
/* pre whitening but absorb kw2 */ /* pre whitening but absorb kw2 */
io[0] ^= SUBKEY_L(0); io[0] ^= SUBKEY_L(0);
...@@ -894,30 +894,30 @@ static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max) ...@@ -894,30 +894,30 @@ static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max)
/* main iteration */ /* main iteration */
#define ROUNDS(i) do { \ #define ROUNDS(i) do { \
CAMELLIA_ROUNDSM(io[0],io[1], \ CAMELLIA_ROUNDSM(io[0], io[1], \
SUBKEY_L(i + 2),SUBKEY_R(i + 2), \ SUBKEY_L(i + 2), SUBKEY_R(i + 2), \
io[2],io[3],il,ir); \ io[2], io[3], il, ir); \
CAMELLIA_ROUNDSM(io[2],io[3], \ CAMELLIA_ROUNDSM(io[2], io[3], \
SUBKEY_L(i + 3),SUBKEY_R(i + 3), \ SUBKEY_L(i + 3), SUBKEY_R(i + 3), \
io[0],io[1],il,ir); \ io[0], io[1], il, ir); \
CAMELLIA_ROUNDSM(io[0],io[1], \ CAMELLIA_ROUNDSM(io[0], io[1], \
SUBKEY_L(i + 4),SUBKEY_R(i + 4), \ SUBKEY_L(i + 4), SUBKEY_R(i + 4), \
io[2],io[3],il,ir); \ io[2], io[3], il, ir); \
CAMELLIA_ROUNDSM(io[2],io[3], \ CAMELLIA_ROUNDSM(io[2], io[3], \
SUBKEY_L(i + 5),SUBKEY_R(i + 5), \ SUBKEY_L(i + 5), SUBKEY_R(i + 5), \
io[0],io[1],il,ir); \ io[0], io[1], il, ir); \
CAMELLIA_ROUNDSM(io[0],io[1], \ CAMELLIA_ROUNDSM(io[0], io[1], \
SUBKEY_L(i + 6),SUBKEY_R(i + 6), \ SUBKEY_L(i + 6), SUBKEY_R(i + 6), \
io[2],io[3],il,ir); \ io[2], io[3], il, ir); \
CAMELLIA_ROUNDSM(io[2],io[3], \ CAMELLIA_ROUNDSM(io[2], io[3], \
SUBKEY_L(i + 7),SUBKEY_R(i + 7), \ SUBKEY_L(i + 7), SUBKEY_R(i + 7), \
io[0],io[1],il,ir); \ io[0], io[1], il, ir); \
} while (0) } while (0)
#define FLS(i) do { \ #define FLS(i) do { \
CAMELLIA_FLS(io[0],io[1],io[2],io[3], \ CAMELLIA_FLS(io[0], io[1], io[2], io[3], \
SUBKEY_L(i + 0),SUBKEY_R(i + 0), \ SUBKEY_L(i + 0), SUBKEY_R(i + 0), \
SUBKEY_L(i + 1),SUBKEY_R(i + 1), \ SUBKEY_L(i + 1), SUBKEY_R(i + 1), \
t0,t1,il,ir); \ t0, t1, il, ir); \
} while (0) } while (0)
ROUNDS(0); ROUNDS(0);
...@@ -941,7 +941,7 @@ static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max) ...@@ -941,7 +941,7 @@ static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max)
static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i) static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i)
{ {
u32 il,ir,t0,t1; /* temporary variables */ u32 il, ir, t0, t1; /* temporary variables */
/* pre whitening but absorb kw2 */ /* pre whitening but absorb kw2 */
io[0] ^= SUBKEY_L(i); io[0] ^= SUBKEY_L(i);
...@@ -949,30 +949,30 @@ static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i) ...@@ -949,30 +949,30 @@ static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i)
/* main iteration */ /* main iteration */
#define ROUNDS(i) do { \ #define ROUNDS(i) do { \
CAMELLIA_ROUNDSM(io[0],io[1], \ CAMELLIA_ROUNDSM(io[0], io[1], \
SUBKEY_L(i + 7),SUBKEY_R(i + 7), \ SUBKEY_L(i + 7), SUBKEY_R(i + 7), \
io[2],io[3],il,ir); \ io[2], io[3], il, ir); \
CAMELLIA_ROUNDSM(io[2],io[3], \ CAMELLIA_ROUNDSM(io[2], io[3], \
SUBKEY_L(i + 6),SUBKEY_R(i + 6), \ SUBKEY_L(i + 6), SUBKEY_R(i + 6), \
io[0],io[1],il,ir); \ io[0], io[1], il, ir); \
CAMELLIA_ROUNDSM(io[0],io[1], \ CAMELLIA_ROUNDSM(io[0], io[1], \
SUBKEY_L(i + 5),SUBKEY_R(i + 5), \ SUBKEY_L(i + 5), SUBKEY_R(i + 5), \
io[2],io[3],il,ir); \ io[2], io[3], il, ir); \
CAMELLIA_ROUNDSM(io[2],io[3], \ CAMELLIA_ROUNDSM(io[2], io[3], \
SUBKEY_L(i + 4),SUBKEY_R(i + 4), \ SUBKEY_L(i + 4), SUBKEY_R(i + 4), \
io[0],io[1],il,ir); \ io[0], io[1], il, ir); \
CAMELLIA_ROUNDSM(io[0],io[1], \ CAMELLIA_ROUNDSM(io[0], io[1], \
SUBKEY_L(i + 3),SUBKEY_R(i + 3), \ SUBKEY_L(i + 3), SUBKEY_R(i + 3), \
io[2],io[3],il,ir); \ io[2], io[3], il, ir); \
CAMELLIA_ROUNDSM(io[2],io[3], \ CAMELLIA_ROUNDSM(io[2], io[3], \
SUBKEY_L(i + 2),SUBKEY_R(i + 2), \ SUBKEY_L(i + 2), SUBKEY_R(i + 2), \
io[0],io[1],il,ir); \ io[0], io[1], il, ir); \
} while (0) } while (0)
#define FLS(i) do { \ #define FLS(i) do { \
CAMELLIA_FLS(io[0],io[1],io[2],io[3], \ CAMELLIA_FLS(io[0], io[1], io[2], io[3], \
SUBKEY_L(i + 1),SUBKEY_R(i + 1), \ SUBKEY_L(i + 1), SUBKEY_R(i + 1), \
SUBKEY_L(i + 0),SUBKEY_R(i + 0), \ SUBKEY_L(i + 0), SUBKEY_R(i + 0), \
t0,t1,il,ir); \ t0, t1, il, ir); \
} while (0) } while (0)
if (i == 32) { if (i == 32) {
......
...@@ -569,12 +569,12 @@ static const u32 sb8[256] = { ...@@ -569,12 +569,12 @@ static const u32 sb8[256] = {
0xeaee6801, 0x8db2a283, 0xea8bf59e 0xeaee6801, 0x8db2a283, 0xea8bf59e
}; };
#define F1(D,m,r) ( (I = ((m) + (D))), (I=rol32(I,(r))), \ #define F1(D, m, r) ((I = ((m) + (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]) ) (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
#define F2(D,m,r) ( (I = ((m) ^ (D))), (I=rol32(I,(r))), \ #define F2(D, m, r) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]) ) (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]))
#define F3(D,m,r) ( (I = ((m) - (D))), (I=rol32(I,(r))), \ #define F3(D, m, r) ((I = ((m) - (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) ) (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
...@@ -694,7 +694,7 @@ static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) ...@@ -694,7 +694,7 @@ static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
dst[1] = cpu_to_be32(l); dst[1] = cpu_to_be32(l);
} }
static void key_schedule(u32 * x, u32 * z, u32 * k) static void key_schedule(u32 *x, u32 *z, u32 *k)
{ {
#define xi(i) ((x[(i)/4] >> (8*(3-((i)%4)))) & 0xff) #define xi(i) ((x[(i)/4] >> (8*(3-((i)%4)))) & 0xff)
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
* under the terms of GNU General Public License as published by the Free * under the terms of GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option) * Software Foundation; either version 2 of the License, or (at your option)
* any later version. * any later version.
* *
* You should have received a copy of the GNU General Public License * You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
...@@ -35,12 +35,12 @@ struct cast6_ctx { ...@@ -35,12 +35,12 @@ struct cast6_ctx {
u8 Kr[12][4]; u8 Kr[12][4];
}; };
#define F1(D,r,m) ( (I = ((m) + (D))), (I=rol32(I,(r))), \ #define F1(D, r, m) ((I = ((m) + (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]) ) (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
#define F2(D,r,m) ( (I = ((m) ^ (D))), (I=rol32(I,(r))), \ #define F2(D, r, m) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]) ) (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]))
#define F3(D,r,m) ( (I = ((m) - (D))), (I=rol32(I,(r))), \ #define F3(D, r, m) ((I = ((m) - (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) ) (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
static const u32 s1[256] = { static const u32 s1[256] = {
0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f,
...@@ -312,7 +312,7 @@ static const u32 s4[256] = { ...@@ -312,7 +312,7 @@ static const u32 s4[256] = {
static const u32 Tm[24][8] = { static const u32 Tm[24][8] = {
{ 0x5a827999, 0xc95c653a, 0x383650db, 0xa7103c7c, 0x15ea281d, { 0x5a827999, 0xc95c653a, 0x383650db, 0xa7103c7c, 0x15ea281d,
0x84c413be, 0xf39dff5f, 0x6277eb00 } , 0x84c413be, 0xf39dff5f, 0x6277eb00 } ,
{ 0xd151d6a1, 0x402bc242, 0xaf05ade3, 0x1ddf9984, 0x8cb98525, { 0xd151d6a1, 0x402bc242, 0xaf05ade3, 0x1ddf9984, 0x8cb98525,
0xfb9370c6, 0x6a6d5c67, 0xd9474808 } , 0xfb9370c6, 0x6a6d5c67, 0xd9474808 } ,
{ 0x482133a9, 0xb6fb1f4a, 0x25d50aeb, 0x94aef68c, 0x0388e22d, { 0x482133a9, 0xb6fb1f4a, 0x25d50aeb, 0x94aef68c, 0x0388e22d,
...@@ -369,7 +369,8 @@ static const u8 Tr[4][8] = { ...@@ -369,7 +369,8 @@ static const u8 Tr[4][8] = {
}; };
/* forward octave */ /* forward octave */
static void W(u32 *key, unsigned int i) { static void W(u32 *key, unsigned int i)
{
u32 I; u32 I;
key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]); key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]);
key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]); key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]);
...@@ -377,7 +378,7 @@ static void W(u32 *key, unsigned int i) { ...@@ -377,7 +378,7 @@ static void W(u32 *key, unsigned int i) {
key[3] ^= F1(key[4], Tr[i % 4][3], Tm[i][3]); key[3] ^= F1(key[4], Tr[i % 4][3], Tm[i][3]);
key[2] ^= F2(key[3], Tr[i % 4][4], Tm[i][4]); key[2] ^= F2(key[3], Tr[i % 4][4], Tm[i][4]);
key[1] ^= F3(key[2], Tr[i % 4][5], Tm[i][5]); key[1] ^= F3(key[2], Tr[i % 4][5], Tm[i][5]);
key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]); key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]);
key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]); key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]);
} }
...@@ -393,11 +394,11 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key, ...@@ -393,11 +394,11 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
if (key_len % 4 != 0) { if (key_len % 4 != 0) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL; return -EINVAL;
} }
memset(p_key, 0, 32);
memcpy(p_key, in_key, key_len);
memset (p_key, 0, 32);
memcpy (p_key, in_key, key_len);
key[0] = be32_to_cpu(p_key[0]); /* A */ key[0] = be32_to_cpu(p_key[0]); /* A */
key[1] = be32_to_cpu(p_key[1]); /* B */ key[1] = be32_to_cpu(p_key[1]); /* B */
key[2] = be32_to_cpu(p_key[2]); /* C */ key[2] = be32_to_cpu(p_key[2]); /* C */
...@@ -406,18 +407,16 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key, ...@@ -406,18 +407,16 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
key[5] = be32_to_cpu(p_key[5]); /* F */ key[5] = be32_to_cpu(p_key[5]); /* F */
key[6] = be32_to_cpu(p_key[6]); /* G */ key[6] = be32_to_cpu(p_key[6]); /* G */
key[7] = be32_to_cpu(p_key[7]); /* H */ key[7] = be32_to_cpu(p_key[7]); /* H */
for (i = 0; i < 12; i++) { for (i = 0; i < 12; i++) {
W (key, 2 * i); W(key, 2 * i);
W (key, 2 * i + 1); W(key, 2 * i + 1);
c->Kr[i][0] = key[0] & 0x1f; c->Kr[i][0] = key[0] & 0x1f;
c->Kr[i][1] = key[2] & 0x1f; c->Kr[i][1] = key[2] & 0x1f;
c->Kr[i][2] = key[4] & 0x1f; c->Kr[i][2] = key[4] & 0x1f;
c->Kr[i][3] = key[6] & 0x1f; c->Kr[i][3] = key[6] & 0x1f;
c->Km[i][0] = key[7]; c->Km[i][0] = key[7];
c->Km[i][1] = key[5]; c->Km[i][1] = key[5];
c->Km[i][2] = key[3]; c->Km[i][2] = key[3];
...@@ -428,21 +427,23 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key, ...@@ -428,21 +427,23 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
} }
/*forward quad round*/ /*forward quad round*/
static void Q (u32 * block, u8 * Kr, u32 * Km) { static void Q(u32 *block, u8 *Kr, u32 *Km)
{
u32 I; u32 I;
block[2] ^= F1(block[3], Kr[0], Km[0]); block[2] ^= F1(block[3], Kr[0], Km[0]);
block[1] ^= F2(block[2], Kr[1], Km[1]); block[1] ^= F2(block[2], Kr[1], Km[1]);
block[0] ^= F3(block[1], Kr[2], Km[2]); block[0] ^= F3(block[1], Kr[2], Km[2]);
block[3] ^= F1(block[0], Kr[3], Km[3]); block[3] ^= F1(block[0], Kr[3], Km[3]);
} }
/*reverse quad round*/ /*reverse quad round*/
static void QBAR (u32 * block, u8 * Kr, u32 * Km) { static void QBAR(u32 *block, u8 *Kr, u32 *Km)
{
u32 I; u32 I;
block[3] ^= F1(block[0], Kr[3], Km[3]); block[3] ^= F1(block[0], Kr[3], Km[3]);
block[0] ^= F3(block[1], Kr[2], Km[2]); block[0] ^= F3(block[1], Kr[2], Km[2]);
block[1] ^= F2(block[2], Kr[1], Km[1]); block[1] ^= F2(block[2], Kr[1], Km[1]);
block[2] ^= F1(block[3], Kr[0], Km[0]); block[2] ^= F1(block[3], Kr[0], Km[0]);
} }
static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
...@@ -451,64 +452,65 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) ...@@ -451,64 +452,65 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
const __be32 *src = (const __be32 *)inbuf; const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf; __be32 *dst = (__be32 *)outbuf;
u32 block[4]; u32 block[4];
u32 * Km; u32 *Km;
u8 * Kr; u8 *Kr;
block[0] = be32_to_cpu(src[0]); block[0] = be32_to_cpu(src[0]);
block[1] = be32_to_cpu(src[1]); block[1] = be32_to_cpu(src[1]);
block[2] = be32_to_cpu(src[2]); block[2] = be32_to_cpu(src[2]);
block[3] = be32_to_cpu(src[3]); block[3] = be32_to_cpu(src[3]);
Km = c->Km[0]; Kr = c->Kr[0]; Q (block, Kr, Km); Km = c->Km[0]; Kr = c->Kr[0]; Q(block, Kr, Km);
Km = c->Km[1]; Kr = c->Kr[1]; Q (block, Kr, Km); Km = c->Km[1]; Kr = c->Kr[1]; Q(block, Kr, Km);
Km = c->Km[2]; Kr = c->Kr[2]; Q (block, Kr, Km); Km = c->Km[2]; Kr = c->Kr[2]; Q(block, Kr, Km);
Km = c->Km[3]; Kr = c->Kr[3]; Q (block, Kr, Km); Km = c->Km[3]; Kr = c->Kr[3]; Q(block, Kr, Km);
Km = c->Km[4]; Kr = c->Kr[4]; Q (block, Kr, Km); Km = c->Km[4]; Kr = c->Kr[4]; Q(block, Kr, Km);
Km = c->Km[5]; Kr = c->Kr[5]; Q (block, Kr, Km); Km = c->Km[5]; Kr = c->Kr[5]; Q(block, Kr, Km);
Km = c->Km[6]; Kr = c->Kr[6]; QBAR (block, Kr, Km); Km = c->Km[6]; Kr = c->Kr[6]; QBAR(block, Kr, Km);
Km = c->Km[7]; Kr = c->Kr[7]; QBAR (block, Kr, Km); Km = c->Km[7]; Kr = c->Kr[7]; QBAR(block, Kr, Km);
Km = c->Km[8]; Kr = c->Kr[8]; QBAR (block, Kr, Km); Km = c->Km[8]; Kr = c->Kr[8]; QBAR(block, Kr, Km);
Km = c->Km[9]; Kr = c->Kr[9]; QBAR (block, Kr, Km); Km = c->Km[9]; Kr = c->Kr[9]; QBAR(block, Kr, Km);
Km = c->Km[10]; Kr = c->Kr[10]; QBAR (block, Kr, Km); Km = c->Km[10]; Kr = c->Kr[10]; QBAR(block, Kr, Km);
Km = c->Km[11]; Kr = c->Kr[11]; QBAR (block, Kr, Km); Km = c->Km[11]; Kr = c->Kr[11]; QBAR(block, Kr, Km);
dst[0] = cpu_to_be32(block[0]); dst[0] = cpu_to_be32(block[0]);
dst[1] = cpu_to_be32(block[1]); dst[1] = cpu_to_be32(block[1]);
dst[2] = cpu_to_be32(block[2]); dst[2] = cpu_to_be32(block[2]);
dst[3] = cpu_to_be32(block[3]); dst[3] = cpu_to_be32(block[3]);
} }
static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
struct cast6_ctx * c = crypto_tfm_ctx(tfm); {
struct cast6_ctx *c = crypto_tfm_ctx(tfm);
const __be32 *src = (const __be32 *)inbuf; const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf; __be32 *dst = (__be32 *)outbuf;
u32 block[4]; u32 block[4];
u32 * Km; u32 *Km;
u8 * Kr; u8 *Kr;
block[0] = be32_to_cpu(src[0]); block[0] = be32_to_cpu(src[0]);
block[1] = be32_to_cpu(src[1]); block[1] = be32_to_cpu(src[1]);
block[2] = be32_to_cpu(src[2]); block[2] = be32_to_cpu(src[2]);
block[3] = be32_to_cpu(src[3]); block[3] = be32_to_cpu(src[3]);
Km = c->Km[11]; Kr = c->Kr[11]; Q (block, Kr, Km); Km = c->Km[11]; Kr = c->Kr[11]; Q(block, Kr, Km);
Km = c->Km[10]; Kr = c->Kr[10]; Q (block, Kr, Km); Km = c->Km[10]; Kr = c->Kr[10]; Q(block, Kr, Km);
Km = c->Km[9]; Kr = c->Kr[9]; Q (block, Kr, Km); Km = c->Km[9]; Kr = c->Kr[9]; Q(block, Kr, Km);
Km = c->Km[8]; Kr = c->Kr[8]; Q (block, Kr, Km); Km = c->Km[8]; Kr = c->Kr[8]; Q(block, Kr, Km);
Km = c->Km[7]; Kr = c->Kr[7]; Q (block, Kr, Km); Km = c->Km[7]; Kr = c->Kr[7]; Q(block, Kr, Km);
Km = c->Km[6]; Kr = c->Kr[6]; Q (block, Kr, Km); Km = c->Km[6]; Kr = c->Kr[6]; Q(block, Kr, Km);
Km = c->Km[5]; Kr = c->Kr[5]; QBAR (block, Kr, Km); Km = c->Km[5]; Kr = c->Kr[5]; QBAR(block, Kr, Km);
Km = c->Km[4]; Kr = c->Kr[4]; QBAR (block, Kr, Km); Km = c->Km[4]; Kr = c->Kr[4]; QBAR(block, Kr, Km);
Km = c->Km[3]; Kr = c->Kr[3]; QBAR (block, Kr, Km); Km = c->Km[3]; Kr = c->Kr[3]; QBAR(block, Kr, Km);
Km = c->Km[2]; Kr = c->Kr[2]; QBAR (block, Kr, Km); Km = c->Km[2]; Kr = c->Kr[2]; QBAR(block, Kr, Km);
Km = c->Km[1]; Kr = c->Kr[1]; QBAR (block, Kr, Km); Km = c->Km[1]; Kr = c->Kr[1]; QBAR(block, Kr, Km);
Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km); Km = c->Km[0]; Kr = c->Kr[0]; QBAR(block, Kr, Km);
dst[0] = cpu_to_be32(block[0]); dst[0] = cpu_to_be32(block[0]);
dst[1] = cpu_to_be32(block[1]); dst[1] = cpu_to_be32(block[1]);
dst[2] = cpu_to_be32(block[2]); dst[2] = cpu_to_be32(block[2]);
dst[3] = cpu_to_be32(block[3]); dst[3] = cpu_to_be32(block[3]);
} }
static struct crypto_alg alg = { static struct crypto_alg alg = {
.cra_name = "cast6", .cra_name = "cast6",
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free * under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option) * Software Foundation; either version 2 of the License, or (at your option)
* any later version. * any later version.
* *
*/ */
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free * under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option) * Software Foundation; either version 2 of the License, or (at your option)
* any later version. * any later version.
* *
*/ */
...@@ -39,7 +39,7 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm) ...@@ -39,7 +39,7 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm)
ops->cot_compress = crypto_compress; ops->cot_compress = crypto_compress;
ops->cot_decompress = crypto_decompress; ops->cot_decompress = crypto_decompress;
return 0; return 0;
} }
......
/* /*
* Cryptographic API. * Cryptographic API.
* *
* CRC32C chksum * CRC32C chksum
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free * under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option) * Software Foundation; either version 2 of the License, or (at your option)
* any later version. * any later version.
* *
*/ */
...@@ -142,7 +142,7 @@ static u32 crc32c(u32 crc, const u8 *data, unsigned int length) ...@@ -142,7 +142,7 @@ static u32 crc32c(u32 crc, const u8 *data, unsigned int length)
} }
/* /*
* Steps through buffer one byte at at time, calculates reflected * Steps through buffer one byte at at time, calculates reflected
* crc using table. * crc using table.
*/ */
......
/* /*
* Cryptographic API. * Cryptographic API.
* *
* Null algorithms, aka Much Ado About Nothing. * Null algorithms, aka Much Ado About Nothing.
* *
* These are needed for IPsec, and may be useful in general for * These are needed for IPsec, and may be useful in general for
* testing & debugging. * testing & debugging.
* *
* The null cipher is compliant with RFC2410. * The null cipher is compliant with RFC2410.
* *
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
...@@ -163,7 +163,7 @@ MODULE_ALIAS("cipher_null"); ...@@ -163,7 +163,7 @@ MODULE_ALIAS("cipher_null");
static int __init crypto_null_mod_init(void) static int __init crypto_null_mod_init(void)
{ {
int ret = 0; int ret = 0;
ret = crypto_register_alg(&cipher_null); ret = crypto_register_alg(&cipher_null);
if (ret < 0) if (ret < 0)
goto out; goto out;
...@@ -180,7 +180,7 @@ static int __init crypto_null_mod_init(void) ...@@ -180,7 +180,7 @@ static int __init crypto_null_mod_init(void)
if (ret < 0) if (ret < 0)
goto out_unregister_digest; goto out_unregister_digest;
out: out:
return ret; return ret;
out_unregister_digest: out_unregister_digest:
......
/* /*
* Cryptographic API. * Cryptographic API.
* *
* Deflate algorithm (RFC 1951), implemented here primarily for use * Deflate algorithm (RFC 1951), implemented here primarily for use
* by IPCOMP (RFC 3173 & RFC 2394). * by IPCOMP (RFC 3173 & RFC 2394).
* *
* Copyright (c) 2003 James Morris <jmorris@intercode.com.au> * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free * under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option) * Software Foundation; either version 2 of the License, or (at your option)
* any later version. * any later version.
* *
* FIXME: deflate transforms will require up to a total of about 436k of kernel * FIXME: deflate transforms will require up to a total of about 436k of kernel
...@@ -49,7 +49,7 @@ static int deflate_comp_init(struct deflate_ctx *ctx) ...@@ -49,7 +49,7 @@ static int deflate_comp_init(struct deflate_ctx *ctx)
struct z_stream_s *stream = &ctx->comp_stream; struct z_stream_s *stream = &ctx->comp_stream;
stream->workspace = vmalloc(zlib_deflate_workspacesize()); stream->workspace = vmalloc(zlib_deflate_workspacesize());
if (!stream->workspace ) { if (!stream->workspace) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
...@@ -61,7 +61,7 @@ static int deflate_comp_init(struct deflate_ctx *ctx) ...@@ -61,7 +61,7 @@ static int deflate_comp_init(struct deflate_ctx *ctx)
ret = -EINVAL; ret = -EINVAL;
goto out_free; goto out_free;
} }
out: out:
return ret; return ret;
out_free: out_free:
vfree(stream->workspace); vfree(stream->workspace);
...@@ -74,7 +74,7 @@ static int deflate_decomp_init(struct deflate_ctx *ctx) ...@@ -74,7 +74,7 @@ static int deflate_decomp_init(struct deflate_ctx *ctx)
struct z_stream_s *stream = &ctx->decomp_stream; struct z_stream_s *stream = &ctx->decomp_stream;
stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL); stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
if (!stream->workspace ) { if (!stream->workspace) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
...@@ -106,7 +106,7 @@ static int deflate_init(struct crypto_tfm *tfm) ...@@ -106,7 +106,7 @@ static int deflate_init(struct crypto_tfm *tfm)
{ {
struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
int ret; int ret;
ret = deflate_comp_init(ctx); ret = deflate_comp_init(ctx);
if (ret) if (ret)
goto out; goto out;
...@@ -153,11 +153,11 @@ static int deflate_compress(struct crypto_tfm *tfm, const u8 *src, ...@@ -153,11 +153,11 @@ static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
out: out:
return ret; return ret;
} }
static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen) unsigned int slen, u8 *dst, unsigned int *dlen)
{ {
int ret = 0; int ret = 0;
struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
struct z_stream_s *stream = &dctx->decomp_stream; struct z_stream_s *stream = &dctx->decomp_stream;
...@@ -182,7 +182,7 @@ static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, ...@@ -182,7 +182,7 @@ static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
if (ret == Z_OK && !stream->avail_in && stream->avail_out) { if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
u8 zerostuff = 0; u8 zerostuff = 0;
stream->next_in = &zerostuff; stream->next_in = &zerostuff;
stream->avail_in = 1; stream->avail_in = 1;
ret = zlib_inflate(stream, Z_FINISH); ret = zlib_inflate(stream, Z_FINISH);
} }
if (ret != Z_STREAM_END) { if (ret != Z_STREAM_END) {
......
...@@ -869,8 +869,7 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, ...@@ -869,8 +869,7 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) && !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
(*flags & CRYPTO_TFM_REQ_WEAK_KEY)) (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
{
*flags |= CRYPTO_TFM_RES_WEAK_KEY; *flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL; return -EINVAL;
} }
......
...@@ -55,7 +55,7 @@ static int crypto_ecb_crypt(struct blkcipher_desc *desc, ...@@ -55,7 +55,7 @@ static int crypto_ecb_crypt(struct blkcipher_desc *desc,
do { do {
fn(crypto_cipher_tfm(tfm), wdst, wsrc); fn(crypto_cipher_tfm(tfm), wdst, wsrc);
wsrc += bsize; wsrc += bsize;
wdst += bsize; wdst += bsize;
} while ((nbytes -= bsize) >= bsize); } while ((nbytes -= bsize) >= bsize);
......
...@@ -60,13 +60,13 @@ do { \ ...@@ -60,13 +60,13 @@ do { \
u32 t = lo & ((1 << n) - 1); \ u32 t = lo & ((1 << n) - 1); \
lo = (lo >> n) | ((hi & ((1 << n) - 1)) << (32 - n)); \ lo = (lo >> n) | ((hi & ((1 << n) - 1)) << (32 - n)); \
hi = (hi >> n) | (t << (24-n)); \ hi = (hi >> n) | (t << (24-n)); \
} while(0) } while (0)
/* Rotate right one 64 bit number as a 56 bit number */ /* Rotate right one 64 bit number as a 56 bit number */
#define ror56_64(k, n) \ #define ror56_64(k, n) \
do { \ do { \
k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)); \ k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)); \
} while(0) } while (0)
/* /*
* Sboxes for Feistel network derived from * Sboxes for Feistel network derived from
...@@ -228,7 +228,7 @@ do { \ ...@@ -228,7 +228,7 @@ do { \
union lc4 { __be32 l; u8 c[4]; } u; \ union lc4 { __be32 l; u8 c[4]; } u; \
u.l = sched ^ R; \ u.l = sched ^ R; \
L ^= sbox0[u.c[0]] ^ sbox1[u.c[1]] ^ sbox2[u.c[2]] ^ sbox3[u.c[3]]; \ L ^= sbox0[u.c[0]] ^ sbox1[u.c[1]] ^ sbox2[u.c[2]] ^ sbox3[u.c[3]]; \
} while(0) } while (0)
/* /*
* encryptor * encryptor
......
...@@ -37,6 +37,19 @@ struct crypto_rfc4106_ctx { ...@@ -37,6 +37,19 @@ struct crypto_rfc4106_ctx {
u8 nonce[4]; u8 nonce[4];
}; };
struct crypto_rfc4543_ctx {
struct crypto_aead *child;
u8 nonce[4];
};
struct crypto_rfc4543_req_ctx {
u8 auth_tag[16];
struct scatterlist cipher[1];
struct scatterlist payload[2];
struct scatterlist assoc[2];
struct aead_request subreq;
};
struct crypto_gcm_ghash_ctx { struct crypto_gcm_ghash_ctx {
unsigned int cryptlen; unsigned int cryptlen;
struct scatterlist *src; struct scatterlist *src;
...@@ -1047,6 +1060,272 @@ static struct crypto_template crypto_rfc4106_tmpl = { ...@@ -1047,6 +1060,272 @@ static struct crypto_template crypto_rfc4106_tmpl = {
.module = THIS_MODULE, .module = THIS_MODULE,
}; };
static inline struct crypto_rfc4543_req_ctx *crypto_rfc4543_reqctx(
struct aead_request *req)
{
unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
}
static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent);
struct crypto_aead *child = ctx->child;
int err;
if (keylen < 4)
return -EINVAL;
keylen -= 4;
memcpy(ctx->nonce, key + keylen, 4);
crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(child, key, keylen);
crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int crypto_rfc4543_setauthsize(struct crypto_aead *parent,
unsigned int authsize)
{
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent);
if (authsize != 16)
return -EINVAL;
return crypto_aead_setauthsize(ctx->child, authsize);
}
/* this is the same as crypto_authenc_chain */
static void crypto_rfc4543_chain(struct scatterlist *head,
struct scatterlist *sg, int chain)
{
if (chain) {
head->length += sg->length;
sg = scatterwalk_sg_next(sg);
}
if (sg)
scatterwalk_sg_chain(head, 2, sg);
else
sg_mark_end(head);
}
static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
int enc)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
struct aead_request *subreq = &rctx->subreq;
struct scatterlist *dst = req->dst;
struct scatterlist *cipher = rctx->cipher;
struct scatterlist *payload = rctx->payload;
struct scatterlist *assoc = rctx->assoc;
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int assoclen = req->assoclen;
struct page *dstp;
u8 *vdst;
u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child),
crypto_aead_alignmask(ctx->child) + 1);
memcpy(iv, ctx->nonce, 4);
memcpy(iv + 4, req->iv, 8);
/* construct cipher/plaintext */
if (enc)
memset(rctx->auth_tag, 0, authsize);
else
scatterwalk_map_and_copy(rctx->auth_tag, dst,
req->cryptlen - authsize,
authsize, 0);
sg_init_one(cipher, rctx->auth_tag, authsize);
/* construct the aad */
dstp = sg_page(dst);
vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
sg_init_table(payload, 2);
sg_set_buf(payload, req->iv, 8);
crypto_rfc4543_chain(payload, dst, vdst == req->iv + 8);
assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
sg_init_table(assoc, 2);
sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
req->assoc->offset);
crypto_rfc4543_chain(assoc, payload, 0);
aead_request_set_tfm(subreq, ctx->child);
aead_request_set_callback(subreq, req->base.flags, req->base.complete,
req->base.data);
aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv);
aead_request_set_assoc(subreq, assoc, assoclen);
return subreq;
}
static int crypto_rfc4543_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
struct aead_request *subreq;
int err;
subreq = crypto_rfc4543_crypt(req, 1);
err = crypto_aead_encrypt(subreq);
if (err)
return err;
scatterwalk_map_and_copy(rctx->auth_tag, req->dst, req->cryptlen,
crypto_aead_authsize(aead), 1);
return 0;
}
static int crypto_rfc4543_decrypt(struct aead_request *req)
{
req = crypto_rfc4543_crypt(req, 0);
return crypto_aead_decrypt(req);
}
static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_aead *aead;
unsigned long align;
aead = crypto_spawn_aead(spawn);
if (IS_ERR(aead))
return PTR_ERR(aead);
ctx->child = aead;
align = crypto_aead_alignmask(aead);
align &= ~(crypto_tfm_ctx_alignment() - 1);
tfm->crt_aead.reqsize = sizeof(struct crypto_rfc4543_req_ctx) +
ALIGN(crypto_aead_reqsize(aead),
crypto_tfm_ctx_alignment()) +
align + 16;
return 0;
}
static void crypto_rfc4543_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_aead(ctx->child);
}
static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb)
{
struct crypto_attr_type *algt;
struct crypto_instance *inst;
struct crypto_aead_spawn *spawn;
struct crypto_alg *alg;
const char *ccm_name;
int err;
algt = crypto_get_attr_type(tb);
err = PTR_ERR(algt);
if (IS_ERR(algt))
return ERR_PTR(err);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
ccm_name = crypto_attr_alg_name(tb[1]);
err = PTR_ERR(ccm_name);
if (IS_ERR(ccm_name))
return ERR_PTR(err);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return ERR_PTR(-ENOMEM);
spawn = crypto_instance_ctx(inst);
crypto_set_aead_spawn(spawn, inst);
err = crypto_grab_aead(spawn, ccm_name, 0,
crypto_requires_sync(algt->type, algt->mask));
if (err)
goto out_free_inst;
alg = crypto_aead_spawn_alg(spawn);
err = -EINVAL;
/* We only support 16-byte blocks. */
if (alg->cra_aead.ivsize != 16)
goto out_drop_alg;
/* Not a stream cipher? */
if (alg->cra_blocksize != 1)
goto out_drop_alg;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
"rfc4543(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"rfc4543(%s)", alg->cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto out_drop_alg;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.cra_priority = alg->cra_priority;
inst->alg.cra_blocksize = 1;
inst->alg.cra_alignmask = alg->cra_alignmask;
inst->alg.cra_type = &crypto_nivaead_type;
inst->alg.cra_aead.ivsize = 8;
inst->alg.cra_aead.maxauthsize = 16;
inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
inst->alg.cra_init = crypto_rfc4543_init_tfm;
inst->alg.cra_exit = crypto_rfc4543_exit_tfm;
inst->alg.cra_aead.setkey = crypto_rfc4543_setkey;
inst->alg.cra_aead.setauthsize = crypto_rfc4543_setauthsize;
inst->alg.cra_aead.encrypt = crypto_rfc4543_encrypt;
inst->alg.cra_aead.decrypt = crypto_rfc4543_decrypt;
inst->alg.cra_aead.geniv = "seqiv";
out:
return inst;
out_drop_alg:
crypto_drop_aead(spawn);
out_free_inst:
kfree(inst);
inst = ERR_PTR(err);
goto out;
}
static void crypto_rfc4543_free(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(inst);
}
static struct crypto_template crypto_rfc4543_tmpl = {
.name = "rfc4543",
.alloc = crypto_rfc4543_alloc,
.free = crypto_rfc4543_free,
.module = THIS_MODULE,
};
static int __init crypto_gcm_module_init(void) static int __init crypto_gcm_module_init(void)
{ {
int err; int err;
...@@ -1067,8 +1346,14 @@ static int __init crypto_gcm_module_init(void) ...@@ -1067,8 +1346,14 @@ static int __init crypto_gcm_module_init(void)
if (err) if (err)
goto out_undo_gcm; goto out_undo_gcm;
err = crypto_register_template(&crypto_rfc4543_tmpl);
if (err)
goto out_undo_rfc4106;
return 0; return 0;
out_undo_rfc4106:
crypto_unregister_template(&crypto_rfc4106_tmpl);
out_undo_gcm: out_undo_gcm:
crypto_unregister_template(&crypto_gcm_tmpl); crypto_unregister_template(&crypto_gcm_tmpl);
out_undo_base: out_undo_base:
...@@ -1081,6 +1366,7 @@ static int __init crypto_gcm_module_init(void) ...@@ -1081,6 +1366,7 @@ static int __init crypto_gcm_module_init(void)
static void __exit crypto_gcm_module_exit(void) static void __exit crypto_gcm_module_exit(void)
{ {
kfree(gcm_zeroes); kfree(gcm_zeroes);
crypto_unregister_template(&crypto_rfc4543_tmpl);
crypto_unregister_template(&crypto_rfc4106_tmpl); crypto_unregister_template(&crypto_rfc4106_tmpl);
crypto_unregister_template(&crypto_gcm_tmpl); crypto_unregister_template(&crypto_gcm_tmpl);
crypto_unregister_template(&crypto_gcm_base_tmpl); crypto_unregister_template(&crypto_gcm_base_tmpl);
...@@ -1094,3 +1380,4 @@ MODULE_DESCRIPTION("Galois/Counter Mode"); ...@@ -1094,3 +1380,4 @@ MODULE_DESCRIPTION("Galois/Counter Mode");
MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>"); MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>");
MODULE_ALIAS("gcm_base"); MODULE_ALIAS("gcm_base");
MODULE_ALIAS("rfc4106"); MODULE_ALIAS("rfc4106");
MODULE_ALIAS("rfc4543");
...@@ -16,17 +16,13 @@ ...@@ -16,17 +16,13 @@
* *
*/ */
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/md5.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#define MD5_DIGEST_SIZE 16
#define MD5_HMAC_BLOCK_SIZE 64
#define MD5_BLOCK_WORDS 16
#define MD5_HASH_WORDS 4
#define F1(x, y, z) (z ^ (x & (y ^ z))) #define F1(x, y, z) (z ^ (x & (y ^ z)))
#define F2(x, y, z) F1(z, x, y) #define F2(x, y, z) F1(z, x, y)
#define F3(x, y, z) (x ^ y ^ z) #define F3(x, y, z) (x ^ y ^ z)
...@@ -35,12 +31,6 @@ ...@@ -35,12 +31,6 @@
#define MD5STEP(f, w, x, y, z, in, s) \ #define MD5STEP(f, w, x, y, z, in, s) \
(w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x) (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
struct md5_ctx {
u32 hash[MD5_HASH_WORDS];
u32 block[MD5_BLOCK_WORDS];
u64 byte_count;
};
static void md5_transform(u32 *hash, u32 const *in) static void md5_transform(u32 *hash, u32 const *in)
{ {
u32 a, b, c, d; u32 a, b, c, d;
...@@ -141,7 +131,7 @@ static inline void cpu_to_le32_array(u32 *buf, unsigned int words) ...@@ -141,7 +131,7 @@ static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
} }
} }
static inline void md5_transform_helper(struct md5_ctx *ctx) static inline void md5_transform_helper(struct md5_state *ctx)
{ {
le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
md5_transform(ctx->hash, ctx->block); md5_transform(ctx->hash, ctx->block);
...@@ -149,7 +139,7 @@ static inline void md5_transform_helper(struct md5_ctx *ctx) ...@@ -149,7 +139,7 @@ static inline void md5_transform_helper(struct md5_ctx *ctx)
static int md5_init(struct shash_desc *desc) static int md5_init(struct shash_desc *desc)
{ {
struct md5_ctx *mctx = shash_desc_ctx(desc); struct md5_state *mctx = shash_desc_ctx(desc);
mctx->hash[0] = 0x67452301; mctx->hash[0] = 0x67452301;
mctx->hash[1] = 0xefcdab89; mctx->hash[1] = 0xefcdab89;
...@@ -162,7 +152,7 @@ static int md5_init(struct shash_desc *desc) ...@@ -162,7 +152,7 @@ static int md5_init(struct shash_desc *desc)
static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{ {
struct md5_ctx *mctx = shash_desc_ctx(desc); struct md5_state *mctx = shash_desc_ctx(desc);
const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
mctx->byte_count += len; mctx->byte_count += len;
...@@ -194,7 +184,7 @@ static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) ...@@ -194,7 +184,7 @@ static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
static int md5_final(struct shash_desc *desc, u8 *out) static int md5_final(struct shash_desc *desc, u8 *out)
{ {
struct md5_ctx *mctx = shash_desc_ctx(desc); struct md5_state *mctx = shash_desc_ctx(desc);
const unsigned int offset = mctx->byte_count & 0x3f; const unsigned int offset = mctx->byte_count & 0x3f;
char *p = (char *)mctx->block + offset; char *p = (char *)mctx->block + offset;
int padding = 56 - (offset + 1); int padding = 56 - (offset + 1);
...@@ -220,12 +210,30 @@ static int md5_final(struct shash_desc *desc, u8 *out) ...@@ -220,12 +210,30 @@ static int md5_final(struct shash_desc *desc, u8 *out)
return 0; return 0;
} }
static int md5_export(struct shash_desc *desc, void *out)
{
struct md5_state *ctx = shash_desc_ctx(desc);
memcpy(out, ctx, sizeof(*ctx));
return 0;
}
static int md5_import(struct shash_desc *desc, const void *in)
{
struct md5_state *ctx = shash_desc_ctx(desc);
memcpy(ctx, in, sizeof(*ctx));
return 0;
}
static struct shash_alg alg = { static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE, .digestsize = MD5_DIGEST_SIZE,
.init = md5_init, .init = md5_init,
.update = md5_update, .update = md5_update,
.final = md5_final, .final = md5_final,
.descsize = sizeof(struct md5_ctx), .export = md5_export,
.import = md5_import,
.descsize = sizeof(struct md5_state),
.base = { .base = {
.cra_name = "md5", .cra_name = "md5",
.cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_flags = CRYPTO_ALG_TYPE_SHASH,
......
/*
* pcrypt - Parallel crypto wrapper.
*
* Copyright (C) 2009 secunet Security Networks AG
* Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <crypto/pcrypt.h>
static struct padata_instance *pcrypt_enc_padata;
static struct padata_instance *pcrypt_dec_padata;
static struct workqueue_struct *encwq;
static struct workqueue_struct *decwq;
struct pcrypt_instance_ctx {
struct crypto_spawn spawn;
unsigned int tfm_count;
};
struct pcrypt_aead_ctx {
struct crypto_aead *child;
unsigned int cb_cpu;
};
static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
struct padata_instance *pinst)
{
unsigned int cpu_index, cpu, i;
cpu = *cb_cpu;
if (cpumask_test_cpu(cpu, cpu_active_mask))
goto out;
cpu_index = cpu % cpumask_weight(cpu_active_mask);
cpu = cpumask_first(cpu_active_mask);
for (i = 0; i < cpu_index; i++)
cpu = cpumask_next(cpu, cpu_active_mask);
*cb_cpu = cpu;
out:
return padata_do_parallel(pinst, padata, cpu);
}
static int pcrypt_aead_setkey(struct crypto_aead *parent,
const u8 *key, unsigned int keylen)
{
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
return crypto_aead_setkey(ctx->child, key, keylen);
}
static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
unsigned int authsize)
{
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
return crypto_aead_setauthsize(ctx->child, authsize);
}
static void pcrypt_aead_serial(struct padata_priv *padata)
{
struct pcrypt_request *preq = pcrypt_padata_request(padata);
struct aead_request *req = pcrypt_request_ctx(preq);
aead_request_complete(req->base.data, padata->info);
}
static void pcrypt_aead_giv_serial(struct padata_priv *padata)
{
struct pcrypt_request *preq = pcrypt_padata_request(padata);
struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
aead_request_complete(req->areq.base.data, padata->info);
}
static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
struct pcrypt_request *preq = aead_request_ctx(req);
struct padata_priv *padata = pcrypt_request_padata(preq);
padata->info = err;
req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
padata_do_serial(padata);
}
static void pcrypt_aead_enc(struct padata_priv *padata)
{
struct pcrypt_request *preq = pcrypt_padata_request(padata);
struct aead_request *req = pcrypt_request_ctx(preq);
padata->info = crypto_aead_encrypt(req);
if (padata->info == -EINPROGRESS)
return;
padata_do_serial(padata);
}
static int pcrypt_aead_encrypt(struct aead_request *req)
{
int err;
struct pcrypt_request *preq = aead_request_ctx(req);
struct aead_request *creq = pcrypt_request_ctx(preq);
struct padata_priv *padata = pcrypt_request_padata(preq);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
u32 flags = aead_request_flags(req);
memset(padata, 0, sizeof(struct padata_priv));
padata->parallel = pcrypt_aead_enc;
padata->serial = pcrypt_aead_serial;
aead_request_set_tfm(creq, ctx->child);
aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
pcrypt_aead_done, req);
aead_request_set_crypt(creq, req->src, req->dst,
req->cryptlen, req->iv);
aead_request_set_assoc(creq, req->assoc, req->assoclen);
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata);
if (err)
return err;
else
err = crypto_aead_encrypt(creq);
return err;
}
static void pcrypt_aead_dec(struct padata_priv *padata)
{
struct pcrypt_request *preq = pcrypt_padata_request(padata);
struct aead_request *req = pcrypt_request_ctx(preq);
padata->info = crypto_aead_decrypt(req);
if (padata->info == -EINPROGRESS)
return;
padata_do_serial(padata);
}
static int pcrypt_aead_decrypt(struct aead_request *req)
{
int err;
struct pcrypt_request *preq = aead_request_ctx(req);
struct aead_request *creq = pcrypt_request_ctx(preq);
struct padata_priv *padata = pcrypt_request_padata(preq);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
u32 flags = aead_request_flags(req);
memset(padata, 0, sizeof(struct padata_priv));
padata->parallel = pcrypt_aead_dec;
padata->serial = pcrypt_aead_serial;
aead_request_set_tfm(creq, ctx->child);
aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
pcrypt_aead_done, req);
aead_request_set_crypt(creq, req->src, req->dst,
req->cryptlen, req->iv);
aead_request_set_assoc(creq, req->assoc, req->assoclen);
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_dec_padata);
if (err)
return err;
else
err = crypto_aead_decrypt(creq);
return err;
}
static void pcrypt_aead_givenc(struct padata_priv *padata)
{
struct pcrypt_request *preq = pcrypt_padata_request(padata);
struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
padata->info = crypto_aead_givencrypt(req);
if (padata->info == -EINPROGRESS)
return;
padata_do_serial(padata);
}
static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req)
{
int err;
struct aead_request *areq = &req->areq;
struct pcrypt_request *preq = aead_request_ctx(areq);
struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq);
struct padata_priv *padata = pcrypt_request_padata(preq);
struct crypto_aead *aead = aead_givcrypt_reqtfm(req);
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
u32 flags = aead_request_flags(areq);
memset(padata, 0, sizeof(struct padata_priv));
padata->parallel = pcrypt_aead_givenc;
padata->serial = pcrypt_aead_giv_serial;
aead_givcrypt_set_tfm(creq, ctx->child);
aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
pcrypt_aead_done, areq);
aead_givcrypt_set_crypt(creq, areq->src, areq->dst,
areq->cryptlen, areq->iv);
aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen);
aead_givcrypt_set_giv(creq, req->giv, req->seq);
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata);
if (err)
return err;
else
err = crypto_aead_givencrypt(creq);
return err;
}
static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
{
int cpu, cpu_index;
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst);
struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_aead *cipher;
ictx->tfm_count++;
cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask);
ctx->cb_cpu = cpumask_first(cpu_active_mask);
for (cpu = 0; cpu < cpu_index; cpu++)
ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask);
cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
tfm->crt_aead.reqsize = sizeof(struct pcrypt_request)
+ sizeof(struct aead_givcrypt_request)
+ crypto_aead_reqsize(cipher);
return 0;
}
static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm)
{
struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_aead(ctx->child);
}
static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg)
{
struct crypto_instance *inst;
struct pcrypt_instance_ctx *ctx;
int err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst) {
inst = ERR_PTR(-ENOMEM);
goto out;
}
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto out_free_inst;
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
ctx = crypto_instance_ctx(inst);
err = crypto_init_spawn(&ctx->spawn, alg, inst,
CRYPTO_ALG_TYPE_MASK);
if (err)
goto out_free_inst;
inst->alg.cra_priority = alg->cra_priority + 100;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
out:
return inst;
out_free_inst:
kfree(inst);
inst = ERR_PTR(err);
goto out;
}
static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
alg = crypto_get_attr_alg(tb, algt->type,
(algt->mask & CRYPTO_ALG_TYPE_MASK));
if (IS_ERR(alg))
return ERR_CAST(alg);
inst = pcrypt_alloc_instance(alg);
if (IS_ERR(inst))
goto out_put_alg;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
inst->alg.cra_type = &crypto_aead_type;
inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
inst->alg.cra_init = pcrypt_aead_init_tfm;
inst->alg.cra_exit = pcrypt_aead_exit_tfm;
inst->alg.cra_aead.setkey = pcrypt_aead_setkey;
inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize;
inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt;
inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt;
inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt;
out_put_alg:
crypto_mod_put(alg);
return inst;
}
static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return ERR_CAST(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
return pcrypt_alloc_aead(tb);
}
return ERR_PTR(-EINVAL);
}
static void pcrypt_free(struct crypto_instance *inst)
{
struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
crypto_drop_spawn(&ctx->spawn);
kfree(inst);
}
static struct crypto_template pcrypt_tmpl = {
.name = "pcrypt",
.alloc = pcrypt_alloc,
.free = pcrypt_free,
.module = THIS_MODULE,
};
static int __init pcrypt_init(void)
{
encwq = create_workqueue("pencrypt");
if (!encwq)
goto err;
decwq = create_workqueue("pdecrypt");
if (!decwq)
goto err_destroy_encwq;
pcrypt_enc_padata = padata_alloc(cpu_possible_mask, encwq);
if (!pcrypt_enc_padata)
goto err_destroy_decwq;
pcrypt_dec_padata = padata_alloc(cpu_possible_mask, decwq);
if (!pcrypt_dec_padata)
goto err_free_padata;
padata_start(pcrypt_enc_padata);
padata_start(pcrypt_dec_padata);
return crypto_register_template(&pcrypt_tmpl);
err_free_padata:
padata_free(pcrypt_enc_padata);
err_destroy_decwq:
destroy_workqueue(decwq);
err_destroy_encwq:
destroy_workqueue(encwq);
err:
return -ENOMEM;
}
static void __exit pcrypt_exit(void)
{
padata_stop(pcrypt_enc_padata);
padata_stop(pcrypt_dec_padata);
destroy_workqueue(encwq);
destroy_workqueue(decwq);
padata_free(pcrypt_enc_padata);
padata_free(pcrypt_dec_padata);
crypto_unregister_template(&pcrypt_tmpl);
}
module_init(pcrypt_init);
module_exit(pcrypt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
MODULE_DESCRIPTION("Parallel crypto wrapper");
...@@ -1477,9 +1477,54 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver, ...@@ -1477,9 +1477,54 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
return err; return err;
} }
static int alg_test_null(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
return 0;
}
/* Please keep this list sorted by algorithm name. */ /* Please keep this list sorted by algorithm name. */
static const struct alg_test_desc alg_test_descs[] = { static const struct alg_test_desc alg_test_descs[] = {
{ {
.alg = "__driver-cbc-aes-aesni",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "__driver-ecb-aes-aesni",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "__ghash-pclmulqdqni",
.test = alg_test_null,
.suite = {
.hash = {
.vecs = NULL,
.count = 0
}
}
}, {
.alg = "ansi_cprng", .alg = "ansi_cprng",
.test = alg_test_cprng, .test = alg_test_cprng,
.fips_allowed = 1, .fips_allowed = 1,
...@@ -1622,6 +1667,30 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1622,6 +1667,30 @@ static const struct alg_test_desc alg_test_descs[] = {
.count = CRC32C_TEST_VECTORS .count = CRC32C_TEST_VECTORS
} }
} }
}, {
.alg = "cryptd(__driver-ecb-aes-aesni)",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "cryptd(__ghash-pclmulqdqni)",
.test = alg_test_null,
.suite = {
.hash = {
.vecs = NULL,
.count = 0
}
}
}, { }, {
.alg = "ctr(aes)", .alg = "ctr(aes)",
.test = alg_test_skcipher, .test = alg_test_skcipher,
...@@ -1668,6 +1737,21 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -1668,6 +1737,21 @@ static const struct alg_test_desc alg_test_descs[] = {
} }
} }
} }
}, {
.alg = "ecb(__aes-aesni)",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, { }, {
.alg = "ecb(aes)", .alg = "ecb(aes)",
.test = alg_test_skcipher, .test = alg_test_skcipher,
......
...@@ -186,3 +186,15 @@ config HW_RANDOM_MXC_RNGA ...@@ -186,3 +186,15 @@ config HW_RANDOM_MXC_RNGA
module will be called mxc-rnga. module will be called mxc-rnga.
If unsure, say Y. If unsure, say Y.
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
depends on HW_RANDOM && PLAT_NOMADIK
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on ST-Ericsson SoCs (8815 and 8500).
To compile this driver as a module, choose M here: the
module will be called nomadik-rng.
If unsure, say Y.
...@@ -18,3 +18,4 @@ obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o ...@@ -18,3 +18,4 @@ obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
/*
* Nomadik RNG support
* Copyright 2009 Alessandro Rubini
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/hw_random.h>
#include <linux/io.h>
static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
void __iomem *base = (void __iomem *)rng->priv;
/*
* The register is 32 bits and gives 16 random bits (low half).
* A subsequent read will delay the core for 400ns, so we just read
* once and accept the very unlikely very small delay, even if wait==0.
*/
*(u16 *)data = __raw_readl(base + 8) & 0xffff;
return 2;
}
/* we have at most one RNG per machine, granted */
static struct hwrng nmk_rng = {
.name = "nomadik",
.read = nmk_rng_read,
};
static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id)
{
void __iomem *base;
int ret;
ret = amba_request_regions(dev, dev->dev.init_name);
if (ret)
return ret;
ret = -ENOMEM;
base = ioremap(dev->res.start, resource_size(&dev->res));
if (!base)
goto out_release;
nmk_rng.priv = (unsigned long)base;
ret = hwrng_register(&nmk_rng);
if (ret)
goto out_unmap;
return 0;
out_unmap:
iounmap(base);
out_release:
amba_release_regions(dev);
return ret;
}
static int nmk_rng_remove(struct amba_device *dev)
{
void __iomem *base = (void __iomem *)nmk_rng.priv;
hwrng_unregister(&nmk_rng);
iounmap(base);
amba_release_regions(dev);
return 0;
}
static struct amba_id nmk_rng_ids[] = {
{
.id = 0x000805e1,
.mask = 0x000fffff, /* top bits are rev and cfg: accept all */
},
{0, 0},
};
static struct amba_driver nmk_rng_driver = {
.drv = {
.owner = THIS_MODULE,
.name = "rng",
},
.probe = nmk_rng_probe,
.remove = nmk_rng_remove,
.id_table = nmk_rng_ids,
};
static int __init nmk_rng_init(void)
{
return amba_driver_register(&nmk_rng_driver);
}
static void __devexit nmk_rng_exit(void)
{
amba_driver_unregister(&nmk_rng_driver);
}
module_init(nmk_rng_init);
module_exit(nmk_rng_exit);
MODULE_LICENSE("GPL");
...@@ -1274,7 +1274,7 @@ static int __exit crypto4xx_remove(struct of_device *ofdev) ...@@ -1274,7 +1274,7 @@ static int __exit crypto4xx_remove(struct of_device *ofdev)
return 0; return 0;
} }
static struct of_device_id crypto4xx_match[] = { static const struct of_device_id crypto4xx_match[] = {
{ .compatible = "amcc,ppc4xx-crypto",}, { .compatible = "amcc,ppc4xx-crypto",},
{ }, { },
}; };
......
...@@ -135,13 +135,13 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, ...@@ -135,13 +135,13 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
/* /*
* The requested key size is not supported by HW, do a fallback * The requested key size is not supported by HW, do a fallback
*/ */
op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(op->fallback.cip, key, len); ret = crypto_cipher_setkey(op->fallback.cip, key, len);
if (ret) { if (ret) {
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK); tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK);
} }
return ret; return ret;
} }
...@@ -263,7 +263,7 @@ static int fallback_init_cip(struct crypto_tfm *tfm) ...@@ -263,7 +263,7 @@ static int fallback_init_cip(struct crypto_tfm *tfm)
if (IS_ERR(op->fallback.cip)) { if (IS_ERR(op->fallback.cip)) {
printk(KERN_ERR "Error allocating fallback algo %s\n", name); printk(KERN_ERR "Error allocating fallback algo %s\n", name);
return PTR_ERR(op->fallback.blk); return PTR_ERR(op->fallback.cip);
} }
return 0; return 0;
......
...@@ -1958,7 +1958,7 @@ static int talitos_probe(struct of_device *ofdev, ...@@ -1958,7 +1958,7 @@ static int talitos_probe(struct of_device *ofdev,
return err; return err;
} }
static struct of_device_id talitos_match[] = { static const struct of_device_id talitos_match[] = {
{ {
.compatible = "fsl,sec2.0", .compatible = "fsl,sec2.0",
}, },
......
#ifndef _CRYPTO_MD5_H
#define _CRYPTO_MD5_H
#include <linux/types.h>
#define MD5_DIGEST_SIZE 16
#define MD5_HMAC_BLOCK_SIZE 64
#define MD5_BLOCK_WORDS 16
#define MD5_HASH_WORDS 4
struct md5_state {
u32 hash[MD5_HASH_WORDS];
u32 block[MD5_BLOCK_WORDS];
u64 byte_count;
};
#endif
/*
* pcrypt - Parallel crypto engine.
*
* Copyright (C) 2009 secunet Security Networks AG
* Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef _CRYPTO_PCRYPT_H
#define _CRYPTO_PCRYPT_H
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/padata.h>
struct pcrypt_request {
struct padata_priv padata;
void *data;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
static inline void *pcrypt_request_ctx(struct pcrypt_request *req)
{
return req->__ctx;
}
static inline
struct padata_priv *pcrypt_request_padata(struct pcrypt_request *req)
{
return &req->padata;
}
static inline
struct pcrypt_request *pcrypt_padata_request(struct padata_priv *padata)
{
return container_of(padata, struct pcrypt_request, padata);
}
#endif
/*
* padata.h - header for the padata parallelization interface
*
* Copyright (C) 2008, 2009 secunet Security Networks AG
* Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef PADATA_H
#define PADATA_H
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/list.h>
struct padata_priv {
struct list_head list;
struct parallel_data *pd;
int cb_cpu;
int seq_nr;
int info;
void (*parallel)(struct padata_priv *padata);
void (*serial)(struct padata_priv *padata);
};
struct padata_list {
struct list_head list;
spinlock_t lock;
};
struct padata_queue {
struct padata_list parallel;
struct padata_list reorder;
struct padata_list serial;
struct work_struct pwork;
struct work_struct swork;
struct parallel_data *pd;
atomic_t num_obj;
int cpu_index;
};
struct parallel_data {
struct padata_instance *pinst;
struct padata_queue *queue;
atomic_t seq_nr;
atomic_t reorder_objects;
atomic_t refcnt;
unsigned int max_seq_nr;
cpumask_var_t cpumask;
spinlock_t lock;
};
struct padata_instance {
struct notifier_block cpu_notifier;
struct workqueue_struct *wq;
struct parallel_data *pd;
cpumask_var_t cpumask;
struct mutex lock;
u8 flags;
#define PADATA_INIT 1
#define PADATA_RESET 2
};
extern struct padata_instance *padata_alloc(const struct cpumask *cpumask,
struct workqueue_struct *wq);
extern void padata_free(struct padata_instance *pinst);
extern int padata_do_parallel(struct padata_instance *pinst,
struct padata_priv *padata, int cb_cpu);
extern void padata_do_serial(struct padata_priv *padata);
extern int padata_set_cpumask(struct padata_instance *pinst,
cpumask_var_t cpumask);
extern int padata_add_cpu(struct padata_instance *pinst, int cpu);
extern int padata_remove_cpu(struct padata_instance *pinst, int cpu);
extern void padata_start(struct padata_instance *pinst);
extern void padata_stop(struct padata_instance *pinst);
#endif
...@@ -315,6 +315,7 @@ struct sadb_x_kmaddress { ...@@ -315,6 +315,7 @@ struct sadb_x_kmaddress {
#define SADB_X_EALG_AES_GCM_ICV12 19 #define SADB_X_EALG_AES_GCM_ICV12 19
#define SADB_X_EALG_AES_GCM_ICV16 20 #define SADB_X_EALG_AES_GCM_ICV16 20
#define SADB_X_EALG_CAMELLIACBC 22 #define SADB_X_EALG_CAMELLIACBC 22
#define SADB_X_EALG_NULL_AES_GMAC 23
#define SADB_EALG_MAX 253 /* last EALG */ #define SADB_EALG_MAX 253 /* last EALG */
/* private allocations should use 249-255 (RFC2407) */ /* private allocations should use 249-255 (RFC2407) */
#define SADB_X_EALG_SERPENTCBC 252 /* draft-ietf-ipsec-ciph-aes-cbc-00 */ #define SADB_X_EALG_SERPENTCBC 252 /* draft-ietf-ipsec-ciph-aes-cbc-00 */
......
...@@ -1262,4 +1262,8 @@ source "block/Kconfig" ...@@ -1262,4 +1262,8 @@ source "block/Kconfig"
config PREEMPT_NOTIFIERS config PREEMPT_NOTIFIERS
bool bool
config PADATA
depends on SMP
bool
source "kernel/Kconfig.locks" source "kernel/Kconfig.locks"
...@@ -100,6 +100,7 @@ obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o ...@@ -100,6 +100,7 @@ obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
obj-$(CONFIG_PADATA) += padata.o
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
......
/*
* padata.c - generic interface to process data streams in parallel
*
* Copyright (C) 2008, 2009 secunet Security Networks AG
* Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/cpu.h>
#include <linux/padata.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/rcupdate.h>
#define MAX_SEQ_NR INT_MAX - NR_CPUS
#define MAX_OBJ_NUM 10000 * NR_CPUS
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
{
int cpu, target_cpu;
target_cpu = cpumask_first(pd->cpumask);
for (cpu = 0; cpu < cpu_index; cpu++)
target_cpu = cpumask_next(target_cpu, pd->cpumask);
return target_cpu;
}
static int padata_cpu_hash(struct padata_priv *padata)
{
int cpu_index;
struct parallel_data *pd;
pd = padata->pd;
/*
* Hash the sequence numbers to the cpus by taking
* seq_nr mod. number of cpus in use.
*/
cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask);
return padata_index_to_cpu(pd, cpu_index);
}
static void padata_parallel_worker(struct work_struct *work)
{
struct padata_queue *queue;
struct parallel_data *pd;
struct padata_instance *pinst;
LIST_HEAD(local_list);
local_bh_disable();
queue = container_of(work, struct padata_queue, pwork);
pd = queue->pd;
pinst = pd->pinst;
spin_lock(&queue->parallel.lock);
list_replace_init(&queue->parallel.list, &local_list);
spin_unlock(&queue->parallel.lock);
while (!list_empty(&local_list)) {
struct padata_priv *padata;
padata = list_entry(local_list.next,
struct padata_priv, list);
list_del_init(&padata->list);
padata->parallel(padata);
}
local_bh_enable();
}
/*
* padata_do_parallel - padata parallelization function
*
* @pinst: padata instance
* @padata: object to be parallelized
* @cb_cpu: cpu the serialization callback function will run on,
* must be in the cpumask of padata.
*
* The parallelization callback function will run with BHs off.
* Note: Every object which is parallelized by padata_do_parallel
* must be seen by padata_do_serial.
*/
int padata_do_parallel(struct padata_instance *pinst,
struct padata_priv *padata, int cb_cpu)
{
int target_cpu, err;
struct padata_queue *queue;
struct parallel_data *pd;
rcu_read_lock_bh();
pd = rcu_dereference(pinst->pd);
err = 0;
if (!(pinst->flags & PADATA_INIT))
goto out;
err = -EBUSY;
if ((pinst->flags & PADATA_RESET))
goto out;
if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
goto out;
err = -EINVAL;
if (!cpumask_test_cpu(cb_cpu, pd->cpumask))
goto out;
err = -EINPROGRESS;
atomic_inc(&pd->refcnt);
padata->pd = pd;
padata->cb_cpu = cb_cpu;
if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
atomic_set(&pd->seq_nr, -1);
padata->seq_nr = atomic_inc_return(&pd->seq_nr);
target_cpu = padata_cpu_hash(padata);
queue = per_cpu_ptr(pd->queue, target_cpu);
spin_lock(&queue->parallel.lock);
list_add_tail(&padata->list, &queue->parallel.list);
spin_unlock(&queue->parallel.lock);
queue_work_on(target_cpu, pinst->wq, &queue->pwork);
out:
rcu_read_unlock_bh();
return err;
}
EXPORT_SYMBOL(padata_do_parallel);
static struct padata_priv *padata_get_next(struct parallel_data *pd)
{
int cpu, num_cpus, empty, calc_seq_nr;
int seq_nr, next_nr, overrun, next_overrun;
struct padata_queue *queue, *next_queue;
struct padata_priv *padata;
struct padata_list *reorder;
empty = 0;
next_nr = -1;
next_overrun = 0;
next_queue = NULL;
num_cpus = cpumask_weight(pd->cpumask);
for_each_cpu(cpu, pd->cpumask) {
queue = per_cpu_ptr(pd->queue, cpu);
reorder = &queue->reorder;
/*
* Calculate the seq_nr of the object that should be
* next in this queue.
*/
overrun = 0;
calc_seq_nr = (atomic_read(&queue->num_obj) * num_cpus)
+ queue->cpu_index;
if (unlikely(calc_seq_nr > pd->max_seq_nr)) {
calc_seq_nr = calc_seq_nr - pd->max_seq_nr - 1;
overrun = 1;
}
if (!list_empty(&reorder->list)) {
padata = list_entry(reorder->list.next,
struct padata_priv, list);
seq_nr = padata->seq_nr;
BUG_ON(calc_seq_nr != seq_nr);
} else {
seq_nr = calc_seq_nr;
empty++;
}
if (next_nr < 0 || seq_nr < next_nr
|| (next_overrun && !overrun)) {
next_nr = seq_nr;
next_overrun = overrun;
next_queue = queue;
}
}
padata = NULL;
if (empty == num_cpus)
goto out;
reorder = &next_queue->reorder;
if (!list_empty(&reorder->list)) {
padata = list_entry(reorder->list.next,
struct padata_priv, list);
if (unlikely(next_overrun)) {
for_each_cpu(cpu, pd->cpumask) {
queue = per_cpu_ptr(pd->queue, cpu);
atomic_set(&queue->num_obj, 0);
}
}
spin_lock(&reorder->lock);
list_del_init(&padata->list);
atomic_dec(&pd->reorder_objects);
spin_unlock(&reorder->lock);
atomic_inc(&next_queue->num_obj);
goto out;
}
if (next_nr % num_cpus == next_queue->cpu_index) {
padata = ERR_PTR(-ENODATA);
goto out;
}
padata = ERR_PTR(-EINPROGRESS);
out:
return padata;
}
static void padata_reorder(struct parallel_data *pd)
{
struct padata_priv *padata;
struct padata_queue *queue;
struct padata_instance *pinst = pd->pinst;
try_again:
if (!spin_trylock_bh(&pd->lock))
goto out;
while (1) {
padata = padata_get_next(pd);
if (!padata || PTR_ERR(padata) == -EINPROGRESS)
break;
if (PTR_ERR(padata) == -ENODATA) {
spin_unlock_bh(&pd->lock);
goto out;
}
queue = per_cpu_ptr(pd->queue, padata->cb_cpu);
spin_lock(&queue->serial.lock);
list_add_tail(&padata->list, &queue->serial.list);
spin_unlock(&queue->serial.lock);
queue_work_on(padata->cb_cpu, pinst->wq, &queue->swork);
}
spin_unlock_bh(&pd->lock);
if (atomic_read(&pd->reorder_objects))
goto try_again;
out:
return;
}
static void padata_serial_worker(struct work_struct *work)
{
struct padata_queue *queue;
struct parallel_data *pd;
LIST_HEAD(local_list);
local_bh_disable();
queue = container_of(work, struct padata_queue, swork);
pd = queue->pd;
spin_lock(&queue->serial.lock);
list_replace_init(&queue->serial.list, &local_list);
spin_unlock(&queue->serial.lock);
while (!list_empty(&local_list)) {
struct padata_priv *padata;
padata = list_entry(local_list.next,
struct padata_priv, list);
list_del_init(&padata->list);
padata->serial(padata);
atomic_dec(&pd->refcnt);
}
local_bh_enable();
}
/*
* padata_do_serial - padata serialization function
*
* @padata: object to be serialized.
*
* padata_do_serial must be called for every parallelized object.
* The serialization callback function will run with BHs off.
*/
void padata_do_serial(struct padata_priv *padata)
{
int cpu;
struct padata_queue *queue;
struct parallel_data *pd;
pd = padata->pd;
cpu = get_cpu();
queue = per_cpu_ptr(pd->queue, cpu);
spin_lock(&queue->reorder.lock);
atomic_inc(&pd->reorder_objects);
list_add_tail(&padata->list, &queue->reorder.list);
spin_unlock(&queue->reorder.lock);
put_cpu();
padata_reorder(pd);
}
EXPORT_SYMBOL(padata_do_serial);
static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
const struct cpumask *cpumask)
{
int cpu, cpu_index, num_cpus;
struct padata_queue *queue;
struct parallel_data *pd;
cpu_index = 0;
pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
if (!pd)
goto err;
pd->queue = alloc_percpu(struct padata_queue);
if (!pd->queue)
goto err_free_pd;
if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL))
goto err_free_queue;
for_each_possible_cpu(cpu) {
queue = per_cpu_ptr(pd->queue, cpu);
queue->pd = pd;
if (cpumask_test_cpu(cpu, cpumask)
&& cpumask_test_cpu(cpu, cpu_active_mask)) {
queue->cpu_index = cpu_index;
cpu_index++;
} else
queue->cpu_index = -1;
INIT_LIST_HEAD(&queue->reorder.list);
INIT_LIST_HEAD(&queue->parallel.list);
INIT_LIST_HEAD(&queue->serial.list);
spin_lock_init(&queue->reorder.lock);
spin_lock_init(&queue->parallel.lock);
spin_lock_init(&queue->serial.lock);
INIT_WORK(&queue->pwork, padata_parallel_worker);
INIT_WORK(&queue->swork, padata_serial_worker);
atomic_set(&queue->num_obj, 0);
}
cpumask_and(pd->cpumask, cpumask, cpu_active_mask);
num_cpus = cpumask_weight(pd->cpumask);
pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1;
atomic_set(&pd->seq_nr, -1);
atomic_set(&pd->reorder_objects, 0);
atomic_set(&pd->refcnt, 0);
pd->pinst = pinst;
spin_lock_init(&pd->lock);
return pd;
err_free_queue:
free_percpu(pd->queue);
err_free_pd:
kfree(pd);
err:
return NULL;
}
static void padata_free_pd(struct parallel_data *pd)
{
free_cpumask_var(pd->cpumask);
free_percpu(pd->queue);
kfree(pd);
}
static void padata_replace(struct padata_instance *pinst,
struct parallel_data *pd_new)
{
struct parallel_data *pd_old = pinst->pd;
pinst->flags |= PADATA_RESET;
rcu_assign_pointer(pinst->pd, pd_new);
synchronize_rcu();
while (atomic_read(&pd_old->refcnt) != 0)
yield();
flush_workqueue(pinst->wq);
padata_free_pd(pd_old);
pinst->flags &= ~PADATA_RESET;
}
/*
* padata_set_cpumask - set the cpumask that padata should use
*
* @pinst: padata instance
* @cpumask: the cpumask to use
*/
int padata_set_cpumask(struct padata_instance *pinst,
cpumask_var_t cpumask)
{
struct parallel_data *pd;
int err = 0;
might_sleep();
mutex_lock(&pinst->lock);
pd = padata_alloc_pd(pinst, cpumask);
if (!pd) {
err = -ENOMEM;
goto out;
}
cpumask_copy(pinst->cpumask, cpumask);
padata_replace(pinst, pd);
out:
mutex_unlock(&pinst->lock);
return err;
}
EXPORT_SYMBOL(padata_set_cpumask);
static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
{
struct parallel_data *pd;
if (cpumask_test_cpu(cpu, cpu_active_mask)) {
pd = padata_alloc_pd(pinst, pinst->cpumask);
if (!pd)
return -ENOMEM;
padata_replace(pinst, pd);
}
return 0;
}
/*
* padata_add_cpu - add a cpu to the padata cpumask
*
* @pinst: padata instance
* @cpu: cpu to add
*/
int padata_add_cpu(struct padata_instance *pinst, int cpu)
{
int err;
might_sleep();
mutex_lock(&pinst->lock);
cpumask_set_cpu(cpu, pinst->cpumask);
err = __padata_add_cpu(pinst, cpu);
mutex_unlock(&pinst->lock);
return err;
}
EXPORT_SYMBOL(padata_add_cpu);
static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
{
struct parallel_data *pd;
if (cpumask_test_cpu(cpu, cpu_online_mask)) {
pd = padata_alloc_pd(pinst, pinst->cpumask);
if (!pd)
return -ENOMEM;
padata_replace(pinst, pd);
}
return 0;
}
/*
* padata_remove_cpu - remove a cpu from the padata cpumask
*
* @pinst: padata instance
* @cpu: cpu to remove
*/
int padata_remove_cpu(struct padata_instance *pinst, int cpu)
{
int err;
might_sleep();
mutex_lock(&pinst->lock);
cpumask_clear_cpu(cpu, pinst->cpumask);
err = __padata_remove_cpu(pinst, cpu);
mutex_unlock(&pinst->lock);
return err;
}
EXPORT_SYMBOL(padata_remove_cpu);
/*
* padata_start - start the parallel processing
*
* @pinst: padata instance to start
*/
void padata_start(struct padata_instance *pinst)
{
might_sleep();
mutex_lock(&pinst->lock);
pinst->flags |= PADATA_INIT;
mutex_unlock(&pinst->lock);
}
EXPORT_SYMBOL(padata_start);
/*
* padata_stop - stop the parallel processing
*
* @pinst: padata instance to stop
*/
void padata_stop(struct padata_instance *pinst)
{
might_sleep();
mutex_lock(&pinst->lock);
pinst->flags &= ~PADATA_INIT;
mutex_unlock(&pinst->lock);
}
EXPORT_SYMBOL(padata_stop);
static int __cpuinit padata_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
int err;
struct padata_instance *pinst;
int cpu = (unsigned long)hcpu;
pinst = container_of(nfb, struct padata_instance, cpu_notifier);
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
if (!cpumask_test_cpu(cpu, pinst->cpumask))
break;
mutex_lock(&pinst->lock);
err = __padata_add_cpu(pinst, cpu);
mutex_unlock(&pinst->lock);
if (err)
return NOTIFY_BAD;
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
if (!cpumask_test_cpu(cpu, pinst->cpumask))
break;
mutex_lock(&pinst->lock);
err = __padata_remove_cpu(pinst, cpu);
mutex_unlock(&pinst->lock);
if (err)
return NOTIFY_BAD;
break;
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
if (!cpumask_test_cpu(cpu, pinst->cpumask))
break;
mutex_lock(&pinst->lock);
__padata_remove_cpu(pinst, cpu);
mutex_unlock(&pinst->lock);
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
if (!cpumask_test_cpu(cpu, pinst->cpumask))
break;
mutex_lock(&pinst->lock);
__padata_add_cpu(pinst, cpu);
mutex_unlock(&pinst->lock);
}
return NOTIFY_OK;
}
/*
* padata_alloc - allocate and initialize a padata instance
*
* @cpumask: cpumask that padata uses for parallelization
* @wq: workqueue to use for the allocated padata instance
*/
struct padata_instance *padata_alloc(const struct cpumask *cpumask,
struct workqueue_struct *wq)
{
int err;
struct padata_instance *pinst;
struct parallel_data *pd;
pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
if (!pinst)
goto err;
pd = padata_alloc_pd(pinst, cpumask);
if (!pd)
goto err_free_inst;
rcu_assign_pointer(pinst->pd, pd);
pinst->wq = wq;
cpumask_copy(pinst->cpumask, cpumask);
pinst->flags = 0;
pinst->cpu_notifier.notifier_call = padata_cpu_callback;
pinst->cpu_notifier.priority = 0;
err = register_hotcpu_notifier(&pinst->cpu_notifier);
if (err)
goto err_free_pd;
mutex_init(&pinst->lock);
return pinst;
err_free_pd:
padata_free_pd(pd);
err_free_inst:
kfree(pinst);
err:
return NULL;
}
EXPORT_SYMBOL(padata_alloc);
/*
* padata_free - free a padata instance
*
* @ padata_inst: padata instance to free
*/
void padata_free(struct padata_instance *pinst)
{
padata_stop(pinst);
synchronize_rcu();
while (atomic_read(&pinst->pd->refcnt) != 0)
yield();
unregister_hotcpu_notifier(&pinst->cpu_notifier);
padata_free_pd(pinst->pd);
kfree(pinst);
}
EXPORT_SYMBOL(padata_free);
...@@ -125,6 +125,22 @@ static struct xfrm_algo_desc aead_list[] = { ...@@ -125,6 +125,22 @@ static struct xfrm_algo_desc aead_list[] = {
.sadb_alg_maxbits = 256 .sadb_alg_maxbits = 256
} }
}, },
{
.name = "rfc4543(gcm(aes))",
.uinfo = {
.aead = {
.icv_truncbits = 128,
}
},
.desc = {
.sadb_alg_id = SADB_X_EALG_NULL_AES_GMAC,
.sadb_alg_ivlen = 8,
.sadb_alg_minbits = 128,
.sadb_alg_maxbits = 256
}
},
}; };
static struct xfrm_algo_desc aalg_list[] = { static struct xfrm_algo_desc aalg_list[] = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment