Commit dc47d381 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://github.com/herbertx/crypto

* git://github.com/herbertx/crypto: (48 commits)
  crypto: user - Depend on NET instead of selecting it
  crypto: user - Add dependency on NET
  crypto: talitos - handle descriptor not found in error path
  crypto: user - Initialise match in crypto_alg_match
  crypto: testmgr - add twofish tests
  crypto: testmgr - add blowfish test-vectors
  crypto: Make hifn_795x build depend on !ARCH_DMA_ADDR_T_64BIT
  crypto: twofish-x86_64-3way - fix ctr blocksize to 1
  crypto: blowfish-x86_64 - fix ctr blocksize to 1
  crypto: whirlpool - count rounds from 0
  crypto: Add userspace report for compress type algorithms
  crypto: Add userspace report for cipher type algorithms
  crypto: Add userspace report for rng type algorithms
  crypto: Add userspace report for pcompress type algorithms
  crypto: Add userspace report for nivaead type algorithms
  crypto: Add userspace report for aead type algorithms
  crypto: Add userspace report for givcipher type algorithms
  crypto: Add userspace report for ablkcipher type algorithms
  crypto: Add userspace report for blkcipher type algorithms
  crypto: Add userspace report for ahash type algorithms
  ...
parents f6d90b4f 5db017aa
Picochip picoXcell SPAcc (Security Protocol Accelerator) bindings
Picochip picoXcell devices contain crypto offload engines that may be used for
IPSEC and femtocell layer 2 ciphering.
Required properties:
- compatible : "picochip,spacc-ipsec" for the IPSEC offload engine
"picochip,spacc-l2" for the femtocell layer 2 ciphering engine.
- reg : Offset and length of the register set for this device
- interrupt-parent : The interrupt controller that controls the SPAcc
interrupt.
- interrupts : The interrupt line from the SPAcc.
- ref-clock : The input clock that drives the SPAcc.
Example SPAcc node:
spacc@10000 {
compatible = "picochip,spacc-ipsec";
reg = <0x100000 0x10000>;
interrupt-parent = <&vic0>;
interrupts = <24>;
ref-clock = <&ipsec_clk>, "ref";
};
......@@ -7,21 +7,33 @@ obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o
obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
aes-i586-y := aes-i586-asm_32.o aes_glue.o
twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o
salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o
salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
# enable AVX support only when $(AS) can actually assemble the instructions
ifeq ($(call as-instr,vpxor %xmm0$(comma)%xmm1$(comma)%xmm2,yes,no),yes)
AFLAGS_sha1_ssse3_asm.o += -DSHA1_ENABLE_AVX_SUPPORT
CFLAGS_sha1_ssse3_glue.o += -DSHA1_ENABLE_AVX_SUPPORT
endif
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
......@@ -4,6 +4,7 @@
*/
#include <crypto/aes.h>
#include <asm/aes.h>
asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
......
/*
* Blowfish Cipher Algorithm (x86_64)
*
* Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
.file "blowfish-x86_64-asm.S"
.text
/* structure of crypto context */
#define p 0
#define s0 ((16 + 2) * 4)
#define s1 ((16 + 2 + (1 * 256)) * 4)
#define s2 ((16 + 2 + (2 * 256)) * 4)
#define s3 ((16 + 2 + (3 * 256)) * 4)
/* register macros */
#define CTX %rdi
#define RIO %rsi
#define RX0 %rax
#define RX1 %rbx
#define RX2 %rcx
#define RX3 %rdx
#define RX0d %eax
#define RX1d %ebx
#define RX2d %ecx
#define RX3d %edx
#define RX0bl %al
#define RX1bl %bl
#define RX2bl %cl
#define RX3bl %dl
#define RX0bh %ah
#define RX1bh %bh
#define RX2bh %ch
#define RX3bh %dh
#define RT0 %rbp
#define RT1 %rsi
#define RT2 %r8
#define RT3 %r9
#define RT0d %ebp
#define RT1d %esi
#define RT2d %r8d
#define RT3d %r9d
#define RKEY %r10
/***********************************************************************
* 1-way blowfish
***********************************************************************/
#define F() \
rorq $16, RX0; \
movzbl RX0bh, RT0d; \
movzbl RX0bl, RT1d; \
rolq $16, RX0; \
movl s0(CTX,RT0,4), RT0d; \
addl s1(CTX,RT1,4), RT0d; \
movzbl RX0bh, RT1d; \
movzbl RX0bl, RT2d; \
rolq $32, RX0; \
xorl s2(CTX,RT1,4), RT0d; \
addl s3(CTX,RT2,4), RT0d; \
xorq RT0, RX0;
#define add_roundkey_enc(n) \
xorq p+4*(n)(CTX), RX0;
#define round_enc(n) \
add_roundkey_enc(n); \
\
F(); \
F();
#define add_roundkey_dec(n) \
movq p+4*(n-1)(CTX), RT0; \
rorq $32, RT0; \
xorq RT0, RX0;
#define round_dec(n) \
add_roundkey_dec(n); \
\
F(); \
F(); \
#define read_block() \
movq (RIO), RX0; \
rorq $32, RX0; \
bswapq RX0;
#define write_block() \
bswapq RX0; \
movq RX0, (RIO);
#define xor_block() \
bswapq RX0; \
xorq RX0, (RIO);
.align 8
.global __blowfish_enc_blk
.type __blowfish_enc_blk,@function;
__blowfish_enc_blk:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
* %rcx: bool, if true: xor output
*/
movq %rbp, %r11;
movq %rsi, %r10;
movq %rdx, RIO;
read_block();
round_enc(0);
round_enc(2);
round_enc(4);
round_enc(6);
round_enc(8);
round_enc(10);
round_enc(12);
round_enc(14);
add_roundkey_enc(16);
movq %r11, %rbp;
movq %r10, RIO;
test %cl, %cl;
jnz __enc_xor;
write_block();
ret;
__enc_xor:
xor_block();
ret;
.align 8
.global blowfish_dec_blk
.type blowfish_dec_blk,@function;
blowfish_dec_blk:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
*/
movq %rbp, %r11;
movq %rsi, %r10;
movq %rdx, RIO;
read_block();
round_dec(17);
round_dec(15);
round_dec(13);
round_dec(11);
round_dec(9);
round_dec(7);
round_dec(5);
round_dec(3);
add_roundkey_dec(1);
movq %r10, RIO;
write_block();
movq %r11, %rbp;
ret;
/**********************************************************************
4-way blowfish, four blocks parallel
**********************************************************************/
/* F() for 4-way. Slower when used alone/1-way, but faster when used
* parallel/4-way (tested on AMD Phenom II & Intel Xeon E7330).
*/
#define F4(x) \
movzbl x ## bh, RT1d; \
movzbl x ## bl, RT3d; \
rorq $16, x; \
movzbl x ## bh, RT0d; \
movzbl x ## bl, RT2d; \
rorq $16, x; \
movl s0(CTX,RT0,4), RT0d; \
addl s1(CTX,RT2,4), RT0d; \
xorl s2(CTX,RT1,4), RT0d; \
addl s3(CTX,RT3,4), RT0d; \
xorq RT0, x;
#define add_preloaded_roundkey4() \
xorq RKEY, RX0; \
xorq RKEY, RX1; \
xorq RKEY, RX2; \
xorq RKEY, RX3;
#define preload_roundkey_enc(n) \
movq p+4*(n)(CTX), RKEY;
#define add_roundkey_enc4(n) \
add_preloaded_roundkey4(); \
preload_roundkey_enc(n + 2);
#define round_enc4(n) \
add_roundkey_enc4(n); \
\
F4(RX0); \
F4(RX1); \
F4(RX2); \
F4(RX3); \
\
F4(RX0); \
F4(RX1); \
F4(RX2); \
F4(RX3);
#define preload_roundkey_dec(n) \
movq p+4*((n)-1)(CTX), RKEY; \
rorq $32, RKEY;
#define add_roundkey_dec4(n) \
add_preloaded_roundkey4(); \
preload_roundkey_dec(n - 2);
#define round_dec4(n) \
add_roundkey_dec4(n); \
\
F4(RX0); \
F4(RX1); \
F4(RX2); \
F4(RX3); \
\
F4(RX0); \
F4(RX1); \
F4(RX2); \
F4(RX3);
#define read_block4() \
movq (RIO), RX0; \
rorq $32, RX0; \
bswapq RX0; \
\
movq 8(RIO), RX1; \
rorq $32, RX1; \
bswapq RX1; \
\
movq 16(RIO), RX2; \
rorq $32, RX2; \
bswapq RX2; \
\
movq 24(RIO), RX3; \
rorq $32, RX3; \
bswapq RX3;
#define write_block4() \
bswapq RX0; \
movq RX0, (RIO); \
\
bswapq RX1; \
movq RX1, 8(RIO); \
\
bswapq RX2; \
movq RX2, 16(RIO); \
\
bswapq RX3; \
movq RX3, 24(RIO);
#define xor_block4() \
bswapq RX0; \
xorq RX0, (RIO); \
\
bswapq RX1; \
xorq RX1, 8(RIO); \
\
bswapq RX2; \
xorq RX2, 16(RIO); \
\
bswapq RX3; \
xorq RX3, 24(RIO);
.align 8
.global __blowfish_enc_blk_4way
.type __blowfish_enc_blk_4way,@function;
__blowfish_enc_blk_4way:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
* %rcx: bool, if true: xor output
*/
pushq %rbp;
pushq %rbx;
pushq %rcx;
preload_roundkey_enc(0);
movq %rsi, %r11;
movq %rdx, RIO;
read_block4();
round_enc4(0);
round_enc4(2);
round_enc4(4);
round_enc4(6);
round_enc4(8);
round_enc4(10);
round_enc4(12);
round_enc4(14);
add_preloaded_roundkey4();
popq %rbp;
movq %r11, RIO;
test %bpl, %bpl;
jnz __enc_xor4;
write_block4();
popq %rbx;
popq %rbp;
ret;
__enc_xor4:
xor_block4();
popq %rbx;
popq %rbp;
ret;
.align 8
.global blowfish_dec_blk_4way
.type blowfish_dec_blk_4way,@function;
blowfish_dec_blk_4way:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
*/
pushq %rbp;
pushq %rbx;
preload_roundkey_dec(17);
movq %rsi, %r11;
movq %rdx, RIO;
read_block4();
round_dec4(17);
round_dec4(15);
round_dec4(13);
round_dec4(11);
round_dec4(9);
round_dec4(7);
round_dec4(5);
round_dec4(3);
add_preloaded_roundkey4();
movq %r11, RIO;
write_block4();
popq %rbx;
popq %rbp;
ret;
/*
* Glue Code for assembler optimized version of Blowfish
*
* Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
* CTR part based on code (crypto/ctr.c) by:
* (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
#include <crypto/blowfish.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <crypto/algapi.h>
/* regular block cipher functions */
asmlinkage void __blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src,
bool xor);
asmlinkage void blowfish_dec_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src);
/* 4-way parallel cipher functions */
asmlinkage void __blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
asmlinkage void blowfish_dec_blk_4way(struct bf_ctx *ctx, u8 *dst,
const u8 *src);
static inline void blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src)
{
__blowfish_enc_blk(ctx, dst, src, false);
}
static inline void blowfish_enc_blk_xor(struct bf_ctx *ctx, u8 *dst,
const u8 *src)
{
__blowfish_enc_blk(ctx, dst, src, true);
}
static inline void blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst,
const u8 *src)
{
__blowfish_enc_blk_4way(ctx, dst, src, false);
}
static inline void blowfish_enc_blk_xor_4way(struct bf_ctx *ctx, u8 *dst,
const u8 *src)
{
__blowfish_enc_blk_4way(ctx, dst, src, true);
}
static void blowfish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
blowfish_enc_blk(crypto_tfm_ctx(tfm), dst, src);
}
static void blowfish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
blowfish_dec_blk(crypto_tfm_ctx(tfm), dst, src);
}
static struct crypto_alg bf_alg = {
.cra_name = "blowfish",
.cra_driver_name = "blowfish-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = BF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bf_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(bf_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = BF_MIN_KEY_SIZE,
.cia_max_keysize = BF_MAX_KEY_SIZE,
.cia_setkey = blowfish_setkey,
.cia_encrypt = blowfish_encrypt,
.cia_decrypt = blowfish_decrypt,
}
}
};
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
void (*fn)(struct bf_ctx *, u8 *, const u8 *),
void (*fn_4way)(struct bf_ctx *, u8 *, const u8 *))
{
struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = BF_BLOCK_SIZE;
unsigned int nbytes;
int err;
err = blkcipher_walk_virt(desc, walk);
while ((nbytes = walk->nbytes)) {
u8 *wsrc = walk->src.virt.addr;
u8 *wdst = walk->dst.virt.addr;
/* Process four block batch */
if (nbytes >= bsize * 4) {
do {
fn_4way(ctx, wdst, wsrc);
wsrc += bsize * 4;
wdst += bsize * 4;
nbytes -= bsize * 4;
} while (nbytes >= bsize * 4);
if (nbytes < bsize)
goto done;
}
/* Handle leftovers */
do {
fn(ctx, wdst, wsrc);
wsrc += bsize;
wdst += bsize;
nbytes -= bsize;
} while (nbytes >= bsize);
done:
err = blkcipher_walk_done(desc, walk, nbytes);
}
return err;
}
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_crypt(desc, &walk, blowfish_enc_blk, blowfish_enc_blk_4way);
}
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_crypt(desc, &walk, blowfish_dec_blk, blowfish_dec_blk_4way);
}
static struct crypto_alg blk_ecb_alg = {
.cra_name = "ecb(blowfish)",
.cra_driver_name = "ecb-blowfish-asm",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = BF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bf_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = BF_MIN_KEY_SIZE,
.max_keysize = BF_MAX_KEY_SIZE,
.setkey = blowfish_setkey,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
},
},
};
static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = BF_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
u64 *dst = (u64 *)walk->dst.virt.addr;
u64 *iv = (u64 *)walk->iv;
do {
*dst = *src ^ *iv;
blowfish_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
iv = dst;
src += 1;
dst += 1;
nbytes -= bsize;
} while (nbytes >= bsize);
*(u64 *)walk->iv = *iv;
return nbytes;
}
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
nbytes = __cbc_encrypt(desc, &walk);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = BF_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
u64 *dst = (u64 *)walk->dst.virt.addr;
u64 ivs[4 - 1];
u64 last_iv;
/* Start of the last block. */
src += nbytes / bsize - 1;
dst += nbytes / bsize - 1;
last_iv = *src;
/* Process four block batch */
if (nbytes >= bsize * 4) {
do {
nbytes -= bsize * 4 - bsize;
src -= 4 - 1;
dst -= 4 - 1;
ivs[0] = src[0];
ivs[1] = src[1];
ivs[2] = src[2];
blowfish_dec_blk_4way(ctx, (u8 *)dst, (u8 *)src);
dst[1] ^= ivs[0];
dst[2] ^= ivs[1];
dst[3] ^= ivs[2];
nbytes -= bsize;
if (nbytes < bsize)
goto done;
*dst ^= *(src - 1);
src -= 1;
dst -= 1;
} while (nbytes >= bsize * 4);
if (nbytes < bsize)
goto done;
}
/* Handle leftovers */
for (;;) {
blowfish_dec_blk(ctx, (u8 *)dst, (u8 *)src);
nbytes -= bsize;
if (nbytes < bsize)
break;
*dst ^= *(src - 1);
src -= 1;
dst -= 1;
}
done:
*dst ^= *(u64 *)walk->iv;
*(u64 *)walk->iv = last_iv;
return nbytes;
}
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
nbytes = __cbc_decrypt(desc, &walk);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static struct crypto_alg blk_cbc_alg = {
.cra_name = "cbc(blowfish)",
.cra_driver_name = "cbc-blowfish-asm",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = BF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bf_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = BF_MIN_KEY_SIZE,
.max_keysize = BF_MAX_KEY_SIZE,
.ivsize = BF_BLOCK_SIZE,
.setkey = blowfish_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
},
};
static void ctr_crypt_final(struct bf_ctx *ctx, struct blkcipher_walk *walk)
{
u8 *ctrblk = walk->iv;
u8 keystream[BF_BLOCK_SIZE];
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
blowfish_enc_blk(ctx, keystream, ctrblk);
crypto_xor(keystream, src, nbytes);
memcpy(dst, keystream, nbytes);
crypto_inc(ctrblk, BF_BLOCK_SIZE);
}
static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = BF_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
u64 *dst = (u64 *)walk->dst.virt.addr;
u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv);
__be64 ctrblocks[4];
/* Process four block batch */
if (nbytes >= bsize * 4) {
do {
if (dst != src) {
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
/* create ctrblks for parallel encrypt */
ctrblocks[0] = cpu_to_be64(ctrblk++);
ctrblocks[1] = cpu_to_be64(ctrblk++);
ctrblocks[2] = cpu_to_be64(ctrblk++);
ctrblocks[3] = cpu_to_be64(ctrblk++);
blowfish_enc_blk_xor_4way(ctx, (u8 *)dst,
(u8 *)ctrblocks);
src += 4;
dst += 4;
} while ((nbytes -= bsize * 4) >= bsize * 4);
if (nbytes < bsize)
goto done;
}
/* Handle leftovers */
do {
if (dst != src)
*dst = *src;
ctrblocks[0] = cpu_to_be64(ctrblk++);
blowfish_enc_blk_xor(ctx, (u8 *)dst, (u8 *)ctrblocks);
src += 1;
dst += 1;
} while ((nbytes -= bsize) >= bsize);
done:
*(__be64 *)walk->iv = cpu_to_be64(ctrblk);
return nbytes;
}
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, BF_BLOCK_SIZE);
while ((nbytes = walk.nbytes) >= BF_BLOCK_SIZE) {
nbytes = __ctr_crypt(desc, &walk);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
if (walk.nbytes) {
ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk);
err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
}
static struct crypto_alg blk_ctr_alg = {
.cra_name = "ctr(blowfish)",
.cra_driver_name = "ctr-blowfish-asm",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct bf_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = BF_MIN_KEY_SIZE,
.max_keysize = BF_MAX_KEY_SIZE,
.ivsize = BF_BLOCK_SIZE,
.setkey = blowfish_setkey,
.encrypt = ctr_crypt,
.decrypt = ctr_crypt,
},
},
};
static int __init init(void)
{
int err;
err = crypto_register_alg(&bf_alg);
if (err)
goto bf_err;
err = crypto_register_alg(&blk_ecb_alg);
if (err)
goto ecb_err;
err = crypto_register_alg(&blk_cbc_alg);
if (err)
goto cbc_err;
err = crypto_register_alg(&blk_ctr_alg);
if (err)
goto ctr_err;
return 0;
ctr_err:
crypto_unregister_alg(&blk_cbc_alg);
cbc_err:
crypto_unregister_alg(&blk_ecb_alg);
ecb_err:
crypto_unregister_alg(&bf_alg);
bf_err:
return err;
}
static void __exit fini(void)
{
crypto_unregister_alg(&blk_ctr_alg);
crypto_unregister_alg(&blk_cbc_alg);
crypto_unregister_alg(&blk_ecb_alg);
crypto_unregister_alg(&bf_alg);
}
module_init(init);
module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized");
MODULE_ALIAS("blowfish");
MODULE_ALIAS("blowfish-asm");
/*
* This is a SIMD SHA-1 implementation. It requires the Intel(R) Supplemental
* SSE3 instruction set extensions introduced in Intel Core Microarchitecture
* processors. CPUs supporting Intel(R) AVX extensions will get an additional
* boost.
*
* This work was inspired by the vectorized implementation of Dean Gaudet.
* Additional information on it can be found at:
* http://www.arctic.org/~dean/crypto/sha1.html
*
* It was improved upon with more efficient vectorization of the message
* scheduling. This implementation has also been optimized for all current and
* several future generations of Intel CPUs.
*
* See this article for more information about the implementation details:
* http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/
*
* Copyright (C) 2010, Intel Corp.
* Authors: Maxim Locktyukhin <maxim.locktyukhin@intel.com>
* Ronen Zohar <ronen.zohar@intel.com>
*
* Converted to AT&T syntax and adapted for inclusion in the Linux kernel:
* Author: Mathias Krause <minipli@googlemail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#define CTX %rdi // arg1
#define BUF %rsi // arg2
#define CNT %rdx // arg3
#define REG_A %ecx
#define REG_B %esi
#define REG_C %edi
#define REG_D %ebp
#define REG_E %edx
#define REG_T1 %eax
#define REG_T2 %ebx
#define K_BASE %r8
#define HASH_PTR %r9
#define BUFFER_PTR %r10
#define BUFFER_END %r11
#define W_TMP1 %xmm0
#define W_TMP2 %xmm9
#define W0 %xmm1
#define W4 %xmm2
#define W8 %xmm3
#define W12 %xmm4
#define W16 %xmm5
#define W20 %xmm6
#define W24 %xmm7
#define W28 %xmm8
#define XMM_SHUFB_BSWAP %xmm10
/* we keep window of 64 w[i]+K pre-calculated values in a circular buffer */
#define WK(t) (((t) & 15) * 4)(%rsp)
#define W_PRECALC_AHEAD 16
/*
* This macro implements the SHA-1 function's body for single 64-byte block
* param: function's name
*/
.macro SHA1_VECTOR_ASM name
.global \name
.type \name, @function
.align 32
\name:
push %rbx
push %rbp
push %r12
mov %rsp, %r12
sub $64, %rsp # allocate workspace
and $~15, %rsp # align stack
mov CTX, HASH_PTR
mov BUF, BUFFER_PTR
shl $6, CNT # multiply by 64
add BUF, CNT
mov CNT, BUFFER_END
lea K_XMM_AR(%rip), K_BASE
xmm_mov BSWAP_SHUFB_CTL(%rip), XMM_SHUFB_BSWAP
SHA1_PIPELINED_MAIN_BODY
# cleanup workspace
mov $8, %ecx
mov %rsp, %rdi
xor %rax, %rax
rep stosq
mov %r12, %rsp # deallocate workspace
pop %r12
pop %rbp
pop %rbx
ret
.size \name, .-\name
.endm
/*
* This macro implements 80 rounds of SHA-1 for one 64-byte block
*/
.macro SHA1_PIPELINED_MAIN_BODY
INIT_REGALLOC
mov (HASH_PTR), A
mov 4(HASH_PTR), B
mov 8(HASH_PTR), C
mov 12(HASH_PTR), D
mov 16(HASH_PTR), E
.set i, 0
.rept W_PRECALC_AHEAD
W_PRECALC i
.set i, (i+1)
.endr
.align 4
1:
RR F1,A,B,C,D,E,0
RR F1,D,E,A,B,C,2
RR F1,B,C,D,E,A,4
RR F1,E,A,B,C,D,6
RR F1,C,D,E,A,B,8
RR F1,A,B,C,D,E,10
RR F1,D,E,A,B,C,12
RR F1,B,C,D,E,A,14
RR F1,E,A,B,C,D,16
RR F1,C,D,E,A,B,18
RR F2,A,B,C,D,E,20
RR F2,D,E,A,B,C,22
RR F2,B,C,D,E,A,24
RR F2,E,A,B,C,D,26
RR F2,C,D,E,A,B,28
RR F2,A,B,C,D,E,30
RR F2,D,E,A,B,C,32
RR F2,B,C,D,E,A,34
RR F2,E,A,B,C,D,36
RR F2,C,D,E,A,B,38
RR F3,A,B,C,D,E,40
RR F3,D,E,A,B,C,42
RR F3,B,C,D,E,A,44
RR F3,E,A,B,C,D,46
RR F3,C,D,E,A,B,48
RR F3,A,B,C,D,E,50
RR F3,D,E,A,B,C,52
RR F3,B,C,D,E,A,54
RR F3,E,A,B,C,D,56
RR F3,C,D,E,A,B,58
add $64, BUFFER_PTR # move to the next 64-byte block
cmp BUFFER_END, BUFFER_PTR # if the current is the last one use
cmovae K_BASE, BUFFER_PTR # dummy source to avoid buffer overrun
RR F4,A,B,C,D,E,60
RR F4,D,E,A,B,C,62
RR F4,B,C,D,E,A,64
RR F4,E,A,B,C,D,66
RR F4,C,D,E,A,B,68
RR F4,A,B,C,D,E,70
RR F4,D,E,A,B,C,72
RR F4,B,C,D,E,A,74
RR F4,E,A,B,C,D,76
RR F4,C,D,E,A,B,78
UPDATE_HASH (HASH_PTR), A
UPDATE_HASH 4(HASH_PTR), B
UPDATE_HASH 8(HASH_PTR), C
UPDATE_HASH 12(HASH_PTR), D
UPDATE_HASH 16(HASH_PTR), E
RESTORE_RENAMED_REGS
cmp K_BASE, BUFFER_PTR # K_BASE means, we reached the end
jne 1b
.endm
.macro INIT_REGALLOC
.set A, REG_A
.set B, REG_B
.set C, REG_C
.set D, REG_D
.set E, REG_E
.set T1, REG_T1
.set T2, REG_T2
.endm
.macro RESTORE_RENAMED_REGS
# order is important (REG_C is where it should be)
mov B, REG_B
mov D, REG_D
mov A, REG_A
mov E, REG_E
.endm
.macro SWAP_REG_NAMES a, b
.set _T, \a
.set \a, \b
.set \b, _T
.endm
.macro F1 b, c, d
mov \c, T1
SWAP_REG_NAMES \c, T1
xor \d, T1
and \b, T1
xor \d, T1
.endm
.macro F2 b, c, d
mov \d, T1
SWAP_REG_NAMES \d, T1
xor \c, T1
xor \b, T1
.endm
.macro F3 b, c ,d
mov \c, T1
SWAP_REG_NAMES \c, T1
mov \b, T2
or \b, T1
and \c, T2
and \d, T1
or T2, T1
.endm
.macro F4 b, c, d
F2 \b, \c, \d
.endm
.macro UPDATE_HASH hash, val
add \hash, \val
mov \val, \hash
.endm
/*
* RR does two rounds of SHA-1 back to back with W[] pre-calc
* t1 = F(b, c, d); e += w(i)
* e += t1; b <<= 30; d += w(i+1);
* t1 = F(a, b, c);
* d += t1; a <<= 5;
* e += a;
* t1 = e; a >>= 7;
* t1 <<= 5;
* d += t1;
*/
.macro RR F, a, b, c, d, e, round
add WK(\round), \e
\F \b, \c, \d # t1 = F(b, c, d);
W_PRECALC (\round + W_PRECALC_AHEAD)
rol $30, \b
add T1, \e
add WK(\round + 1), \d
\F \a, \b, \c
W_PRECALC (\round + W_PRECALC_AHEAD + 1)
rol $5, \a
add \a, \e
add T1, \d
ror $7, \a # (a <<r 5) >>r 7) => a <<r 30)
mov \e, T1
SWAP_REG_NAMES \e, T1
rol $5, T1
add T1, \d
# write: \a, \b
# rotate: \a<=\d, \b<=\e, \c<=\a, \d<=\b, \e<=\c
.endm
.macro W_PRECALC r
.set i, \r
.if (i < 20)
.set K_XMM, 0
.elseif (i < 40)
.set K_XMM, 16
.elseif (i < 60)
.set K_XMM, 32
.elseif (i < 80)
.set K_XMM, 48
.endif
.if ((i < 16) || ((i >= 80) && (i < (80 + W_PRECALC_AHEAD))))
.set i, ((\r) % 80) # pre-compute for the next iteration
.if (i == 0)
W_PRECALC_RESET
.endif
W_PRECALC_00_15
.elseif (i<32)
W_PRECALC_16_31
.elseif (i < 80) // rounds 32-79
W_PRECALC_32_79
.endif
.endm
.macro W_PRECALC_RESET
.set W, W0
.set W_minus_04, W4
.set W_minus_08, W8
.set W_minus_12, W12
.set W_minus_16, W16
.set W_minus_20, W20
.set W_minus_24, W24
.set W_minus_28, W28
.set W_minus_32, W
.endm
.macro W_PRECALC_ROTATE
.set W_minus_32, W_minus_28
.set W_minus_28, W_minus_24
.set W_minus_24, W_minus_20
.set W_minus_20, W_minus_16
.set W_minus_16, W_minus_12
.set W_minus_12, W_minus_08
.set W_minus_08, W_minus_04
.set W_minus_04, W
.set W, W_minus_32
.endm
.macro W_PRECALC_SSSE3
.macro W_PRECALC_00_15
W_PRECALC_00_15_SSSE3
.endm
.macro W_PRECALC_16_31
W_PRECALC_16_31_SSSE3
.endm
.macro W_PRECALC_32_79
W_PRECALC_32_79_SSSE3
.endm
/* message scheduling pre-compute for rounds 0-15 */
.macro W_PRECALC_00_15_SSSE3
.if ((i & 3) == 0)
movdqu (i*4)(BUFFER_PTR), W_TMP1
.elseif ((i & 3) == 1)
pshufb XMM_SHUFB_BSWAP, W_TMP1
movdqa W_TMP1, W
.elseif ((i & 3) == 2)
paddd (K_BASE), W_TMP1
.elseif ((i & 3) == 3)
movdqa W_TMP1, WK(i&~3)
W_PRECALC_ROTATE
.endif
.endm
/* message scheduling pre-compute for rounds 16-31
*
* - calculating last 32 w[i] values in 8 XMM registers
* - pre-calculate K+w[i] values and store to mem, for later load by ALU add
* instruction
*
* some "heavy-lifting" vectorization for rounds 16-31 due to w[i]->w[i-3]
* dependency, but improves for 32-79
*/
.macro W_PRECALC_16_31_SSSE3
# blended scheduling of vector and scalar instruction streams, one 4-wide
# vector iteration / 4 scalar rounds
.if ((i & 3) == 0)
movdqa W_minus_12, W
palignr $8, W_minus_16, W # w[i-14]
movdqa W_minus_04, W_TMP1
psrldq $4, W_TMP1 # w[i-3]
pxor W_minus_08, W
.elseif ((i & 3) == 1)
pxor W_minus_16, W_TMP1
pxor W_TMP1, W
movdqa W, W_TMP2
movdqa W, W_TMP1
pslldq $12, W_TMP2
.elseif ((i & 3) == 2)
psrld $31, W
pslld $1, W_TMP1
por W, W_TMP1
movdqa W_TMP2, W
psrld $30, W_TMP2
pslld $2, W
.elseif ((i & 3) == 3)
pxor W, W_TMP1
pxor W_TMP2, W_TMP1
movdqa W_TMP1, W
paddd K_XMM(K_BASE), W_TMP1
movdqa W_TMP1, WK(i&~3)
W_PRECALC_ROTATE
.endif
.endm
/* message scheduling pre-compute for rounds 32-79
*
* in SHA-1 specification: w[i] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) rol 1
* instead we do equal: w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]) rol 2
* allows more efficient vectorization since w[i]=>w[i-3] dependency is broken
*/
.macro W_PRECALC_32_79_SSSE3
.if ((i & 3) == 0)
movdqa W_minus_04, W_TMP1
pxor W_minus_28, W # W is W_minus_32 before xor
palignr $8, W_minus_08, W_TMP1
.elseif ((i & 3) == 1)
pxor W_minus_16, W
pxor W_TMP1, W
movdqa W, W_TMP1
.elseif ((i & 3) == 2)
psrld $30, W
pslld $2, W_TMP1
por W, W_TMP1
.elseif ((i & 3) == 3)
movdqa W_TMP1, W
paddd K_XMM(K_BASE), W_TMP1
movdqa W_TMP1, WK(i&~3)
W_PRECALC_ROTATE
.endif
.endm
.endm // W_PRECALC_SSSE3
#define K1 0x5a827999
#define K2 0x6ed9eba1
#define K3 0x8f1bbcdc
#define K4 0xca62c1d6
.section .rodata
.align 16
K_XMM_AR:
.long K1, K1, K1, K1
.long K2, K2, K2, K2
.long K3, K3, K3, K3
.long K4, K4, K4, K4
BSWAP_SHUFB_CTL:
.long 0x00010203
.long 0x04050607
.long 0x08090a0b
.long 0x0c0d0e0f
.section .text
W_PRECALC_SSSE3
.macro xmm_mov a, b
movdqu \a,\b
.endm
/* SSSE3 optimized implementation:
* extern "C" void sha1_transform_ssse3(u32 *digest, const char *data, u32 *ws,
* unsigned int rounds);
*/
SHA1_VECTOR_ASM sha1_transform_ssse3
#ifdef SHA1_ENABLE_AVX_SUPPORT
.macro W_PRECALC_AVX
.purgem W_PRECALC_00_15
.macro W_PRECALC_00_15
W_PRECALC_00_15_AVX
.endm
.purgem W_PRECALC_16_31
.macro W_PRECALC_16_31
W_PRECALC_16_31_AVX
.endm
.purgem W_PRECALC_32_79
.macro W_PRECALC_32_79
W_PRECALC_32_79_AVX
.endm
.macro W_PRECALC_00_15_AVX
.if ((i & 3) == 0)
vmovdqu (i*4)(BUFFER_PTR), W_TMP1
.elseif ((i & 3) == 1)
vpshufb XMM_SHUFB_BSWAP, W_TMP1, W
.elseif ((i & 3) == 2)
vpaddd (K_BASE), W, W_TMP1
.elseif ((i & 3) == 3)
vmovdqa W_TMP1, WK(i&~3)
W_PRECALC_ROTATE
.endif
.endm
.macro W_PRECALC_16_31_AVX
.if ((i & 3) == 0)
vpalignr $8, W_minus_16, W_minus_12, W # w[i-14]
vpsrldq $4, W_minus_04, W_TMP1 # w[i-3]
vpxor W_minus_08, W, W
vpxor W_minus_16, W_TMP1, W_TMP1
.elseif ((i & 3) == 1)
vpxor W_TMP1, W, W
vpslldq $12, W, W_TMP2
vpslld $1, W, W_TMP1
.elseif ((i & 3) == 2)
vpsrld $31, W, W
vpor W, W_TMP1, W_TMP1
vpslld $2, W_TMP2, W
vpsrld $30, W_TMP2, W_TMP2
.elseif ((i & 3) == 3)
vpxor W, W_TMP1, W_TMP1
vpxor W_TMP2, W_TMP1, W
vpaddd K_XMM(K_BASE), W, W_TMP1
vmovdqu W_TMP1, WK(i&~3)
W_PRECALC_ROTATE
.endif
.endm
.macro W_PRECALC_32_79_AVX
.if ((i & 3) == 0)
vpalignr $8, W_minus_08, W_minus_04, W_TMP1
vpxor W_minus_28, W, W # W is W_minus_32 before xor
.elseif ((i & 3) == 1)
vpxor W_minus_16, W_TMP1, W_TMP1
vpxor W_TMP1, W, W
.elseif ((i & 3) == 2)
vpslld $2, W, W_TMP1
vpsrld $30, W, W
vpor W, W_TMP1, W
.elseif ((i & 3) == 3)
vpaddd K_XMM(K_BASE), W, W_TMP1
vmovdqu W_TMP1, WK(i&~3)
W_PRECALC_ROTATE
.endif
.endm
.endm // W_PRECALC_AVX
W_PRECALC_AVX
.purgem xmm_mov
.macro xmm_mov a, b
vmovdqu \a,\b
.endm
/* AVX optimized implementation:
* extern "C" void sha1_transform_avx(u32 *digest, const char *data, u32 *ws,
* unsigned int rounds);
*/
SHA1_VECTOR_ASM sha1_transform_avx
#endif
/*
* Cryptographic API.
*
* Glue code for the SHA1 Secure Hash Algorithm assembler implementation using
* Supplemental SSE3 instructions.
*
* This file is based on sha1_generic.c
*
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
* Copyright (c) Mathias Krause <minipli@googlemail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
#include <asm/i387.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data,
unsigned int rounds);
#ifdef SHA1_ENABLE_AVX_SUPPORT
asmlinkage void sha1_transform_avx(u32 *digest, const char *data,
unsigned int rounds);
#endif
static asmlinkage void (*sha1_transform_asm)(u32 *, const char *, unsigned int);
static int sha1_ssse3_init(struct shash_desc *desc)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
*sctx = (struct sha1_state){
.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
};
return 0;
}
static int __sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
unsigned int len, unsigned int partial)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int done = 0;
sctx->count += len;
if (partial) {
done = SHA1_BLOCK_SIZE - partial;
memcpy(sctx->buffer + partial, data, done);
sha1_transform_asm(sctx->state, sctx->buffer, 1);
}
if (len - done >= SHA1_BLOCK_SIZE) {
const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
sha1_transform_asm(sctx->state, data + done, rounds);
done += rounds * SHA1_BLOCK_SIZE;
}
memcpy(sctx->buffer, data + done, len - done);
return 0;
}
static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
int res;
/* Handle the fast case right here */
if (partial + len < SHA1_BLOCK_SIZE) {
sctx->count += len;
memcpy(sctx->buffer + partial, data, len);
return 0;
}
if (!irq_fpu_usable()) {
res = crypto_sha1_update(desc, data, len);
} else {
kernel_fpu_begin();
res = __sha1_ssse3_update(desc, data, len, partial);
kernel_fpu_end();
}
return res;
}
/* Add padding and return the message digest. */
static int sha1_ssse3_final(struct shash_desc *desc, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int i, index, padlen;
__be32 *dst = (__be32 *)out;
__be64 bits;
static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64 and append length */
index = sctx->count % SHA1_BLOCK_SIZE;
padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
if (!irq_fpu_usable()) {
crypto_sha1_update(desc, padding, padlen);
crypto_sha1_update(desc, (const u8 *)&bits, sizeof(bits));
} else {
kernel_fpu_begin();
/* We need to fill a whole block for __sha1_ssse3_update() */
if (padlen <= 56) {
sctx->count += padlen;
memcpy(sctx->buffer + index, padding, padlen);
} else {
__sha1_ssse3_update(desc, padding, padlen, index);
}
__sha1_ssse3_update(desc, (const u8 *)&bits, sizeof(bits), 56);
kernel_fpu_end();
}
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_be32(sctx->state[i]);
/* Wipe context */
memset(sctx, 0, sizeof(*sctx));
return 0;
}
static int sha1_ssse3_export(struct shash_desc *desc, void *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int sha1_ssse3_import(struct shash_desc *desc, const void *in)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_ssse3_init,
.update = sha1_ssse3_update,
.final = sha1_ssse3_final,
.export = sha1_ssse3_export,
.import = sha1_ssse3_import,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-ssse3",
.cra_priority = 150,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
#ifdef SHA1_ENABLE_AVX_SUPPORT
static bool __init avx_usable(void)
{
u64 xcr0;
if (!cpu_has_avx || !cpu_has_osxsave)
return false;
xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n");
return false;
}
return true;
}
#endif
static int __init sha1_ssse3_mod_init(void)
{
/* test for SSSE3 first */
if (cpu_has_ssse3)
sha1_transform_asm = sha1_transform_ssse3;
#ifdef SHA1_ENABLE_AVX_SUPPORT
/* allow AVX to override SSSE3, it's a little faster */
if (avx_usable())
sha1_transform_asm = sha1_transform_avx;
#endif
if (sha1_transform_asm) {
pr_info("Using %s optimized SHA-1 implementation\n",
sha1_transform_asm == sha1_transform_ssse3 ? "SSSE3"
: "AVX");
return crypto_register_shash(&alg);
}
pr_info("Neither AVX nor SSSE3 is available/usable.\n");
return -ENODEV;
}
static void __exit sha1_ssse3_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(sha1_ssse3_mod_init);
module_exit(sha1_ssse3_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
MODULE_ALIAS("sha1");
......@@ -26,7 +26,7 @@
#define in_blk 12 /* input byte array address parameter*/
#define out_blk 8 /* output byte array address parameter*/
#define tfm 4 /* Twofish context structure */
#define ctx 4 /* Twofish context structure */
#define a_offset 0
#define b_offset 4
......@@ -229,8 +229,8 @@ twofish_enc_blk:
push %esi
push %edi
mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */
add $crypto_tfm_ctx_offset, %ebp /* ctx address */
mov ctx + 16(%esp), %ebp /* abuse the base pointer: set new base
* pointer to the ctx address */
mov in_blk+16(%esp),%edi /* input address in edi */
mov (%edi), %eax
......@@ -285,8 +285,8 @@ twofish_dec_blk:
push %edi
mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */
add $crypto_tfm_ctx_offset, %ebp /* ctx address */
mov ctx + 16(%esp), %ebp /* abuse the base pointer: set new base
* pointer to the ctx address */
mov in_blk+16(%esp),%edi /* input address in edi */
mov (%edi), %eax
......
/*
* Twofish Cipher 3-way parallel algorithm (x86_64)
*
* Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
.file "twofish-x86_64-asm-3way.S"
.text
/* structure of crypto context */
#define s0 0
#define s1 1024
#define s2 2048
#define s3 3072
#define w 4096
#define k 4128
/**********************************************************************
3-way twofish
**********************************************************************/
#define CTX %rdi
#define RIO %rdx
#define RAB0 %rax
#define RAB1 %rbx
#define RAB2 %rcx
#define RAB0d %eax
#define RAB1d %ebx
#define RAB2d %ecx
#define RAB0bh %ah
#define RAB1bh %bh
#define RAB2bh %ch
#define RAB0bl %al
#define RAB1bl %bl
#define RAB2bl %cl
#define RCD0 %r8
#define RCD1 %r9
#define RCD2 %r10
#define RCD0d %r8d
#define RCD1d %r9d
#define RCD2d %r10d
#define RX0 %rbp
#define RX1 %r11
#define RX2 %r12
#define RX0d %ebp
#define RX1d %r11d
#define RX2d %r12d
#define RY0 %r13
#define RY1 %r14
#define RY2 %r15
#define RY0d %r13d
#define RY1d %r14d
#define RY2d %r15d
#define RT0 %rdx
#define RT1 %rsi
#define RT0d %edx
#define RT1d %esi
#define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \
movzbl ab ## bl, tmp2 ## d; \
movzbl ab ## bh, tmp1 ## d; \
rorq $(rot), ab; \
op1##l T0(CTX, tmp2, 4), dst ## d; \
op2##l T1(CTX, tmp1, 4), dst ## d;
/*
* Combined G1 & G2 function. Reordered with help of rotates to have moves
* at begining.
*/
#define g1g2_3(ab, cd, Tx0, Tx1, Tx2, Tx3, Ty0, Ty1, Ty2, Ty3, x, y) \
/* G1,1 && G2,1 */ \
do16bit_ror(32, mov, xor, Tx0, Tx1, RT0, x ## 0, ab ## 0, x ## 0); \
do16bit_ror(48, mov, xor, Ty1, Ty2, RT0, y ## 0, ab ## 0, y ## 0); \
\
do16bit_ror(32, mov, xor, Tx0, Tx1, RT0, x ## 1, ab ## 1, x ## 1); \
do16bit_ror(48, mov, xor, Ty1, Ty2, RT0, y ## 1, ab ## 1, y ## 1); \
\
do16bit_ror(32, mov, xor, Tx0, Tx1, RT0, x ## 2, ab ## 2, x ## 2); \
do16bit_ror(48, mov, xor, Ty1, Ty2, RT0, y ## 2, ab ## 2, y ## 2); \
\
/* G1,2 && G2,2 */ \
do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \
do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \
xchgq cd ## 0, ab ## 0; \
\
do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \
do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \
xchgq cd ## 1, ab ## 1; \
\
do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \
do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \
xchgq cd ## 2, ab ## 2;
#define enc_round_end(ab, x, y, n) \
addl y ## d, x ## d; \
addl x ## d, y ## d; \
addl k+4*(2*(n))(CTX), x ## d; \
xorl ab ## d, x ## d; \
addl k+4*(2*(n)+1)(CTX), y ## d; \
shrq $32, ab; \
roll $1, ab ## d; \
xorl y ## d, ab ## d; \
shlq $32, ab; \
rorl $1, x ## d; \
orq x, ab;
#define dec_round_end(ba, x, y, n) \
addl y ## d, x ## d; \
addl x ## d, y ## d; \
addl k+4*(2*(n))(CTX), x ## d; \
addl k+4*(2*(n)+1)(CTX), y ## d; \
xorl ba ## d, y ## d; \
shrq $32, ba; \
roll $1, ba ## d; \
xorl x ## d, ba ## d; \
shlq $32, ba; \
rorl $1, y ## d; \
orq y, ba;
#define encrypt_round3(ab, cd, n) \
g1g2_3(ab, cd, s0, s1, s2, s3, s0, s1, s2, s3, RX, RY); \
\
enc_round_end(ab ## 0, RX0, RY0, n); \
enc_round_end(ab ## 1, RX1, RY1, n); \
enc_round_end(ab ## 2, RX2, RY2, n);
#define decrypt_round3(ba, dc, n) \
g1g2_3(ba, dc, s1, s2, s3, s0, s3, s0, s1, s2, RY, RX); \
\
dec_round_end(ba ## 0, RX0, RY0, n); \
dec_round_end(ba ## 1, RX1, RY1, n); \
dec_round_end(ba ## 2, RX2, RY2, n);
#define encrypt_cycle3(ab, cd, n) \
encrypt_round3(ab, cd, n*2); \
encrypt_round3(ab, cd, (n*2)+1);
#define decrypt_cycle3(ba, dc, n) \
decrypt_round3(ba, dc, (n*2)+1); \
decrypt_round3(ba, dc, (n*2));
#define inpack3(in, n, xy, m) \
movq 4*(n)(in), xy ## 0; \
xorq w+4*m(CTX), xy ## 0; \
\
movq 4*(4+(n))(in), xy ## 1; \
xorq w+4*m(CTX), xy ## 1; \
\
movq 4*(8+(n))(in), xy ## 2; \
xorq w+4*m(CTX), xy ## 2;
#define outunpack3(op, out, n, xy, m) \
xorq w+4*m(CTX), xy ## 0; \
op ## q xy ## 0, 4*(n)(out); \
\
xorq w+4*m(CTX), xy ## 1; \
op ## q xy ## 1, 4*(4+(n))(out); \
\
xorq w+4*m(CTX), xy ## 2; \
op ## q xy ## 2, 4*(8+(n))(out);
#define inpack_enc3() \
inpack3(RIO, 0, RAB, 0); \
inpack3(RIO, 2, RCD, 2);
#define outunpack_enc3(op) \
outunpack3(op, RIO, 2, RAB, 6); \
outunpack3(op, RIO, 0, RCD, 4);
#define inpack_dec3() \
inpack3(RIO, 0, RAB, 4); \
rorq $32, RAB0; \
rorq $32, RAB1; \
rorq $32, RAB2; \
inpack3(RIO, 2, RCD, 6); \
rorq $32, RCD0; \
rorq $32, RCD1; \
rorq $32, RCD2;
#define outunpack_dec3() \
rorq $32, RCD0; \
rorq $32, RCD1; \
rorq $32, RCD2; \
outunpack3(mov, RIO, 0, RCD, 0); \
rorq $32, RAB0; \
rorq $32, RAB1; \
rorq $32, RAB2; \
outunpack3(mov, RIO, 2, RAB, 2);
.align 8
.global __twofish_enc_blk_3way
.type __twofish_enc_blk_3way,@function;
__twofish_enc_blk_3way:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src, RIO
* %rcx: bool, if true: xor output
*/
pushq %r15;
pushq %r14;
pushq %r13;
pushq %r12;
pushq %rbp;
pushq %rbx;
pushq %rcx; /* bool xor */
pushq %rsi; /* dst */
inpack_enc3();
encrypt_cycle3(RAB, RCD, 0);
encrypt_cycle3(RAB, RCD, 1);
encrypt_cycle3(RAB, RCD, 2);
encrypt_cycle3(RAB, RCD, 3);
encrypt_cycle3(RAB, RCD, 4);
encrypt_cycle3(RAB, RCD, 5);
encrypt_cycle3(RAB, RCD, 6);
encrypt_cycle3(RAB, RCD, 7);
popq RIO; /* dst */
popq %rbp; /* bool xor */
testb %bpl, %bpl;
jnz __enc_xor3;
outunpack_enc3(mov);
popq %rbx;
popq %rbp;
popq %r12;
popq %r13;
popq %r14;
popq %r15;
ret;
__enc_xor3:
outunpack_enc3(xor);
popq %rbx;
popq %rbp;
popq %r12;
popq %r13;
popq %r14;
popq %r15;
ret;
.global twofish_dec_blk_3way
.type twofish_dec_blk_3way,@function;
twofish_dec_blk_3way:
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src, RIO
*/
pushq %r15;
pushq %r14;
pushq %r13;
pushq %r12;
pushq %rbp;
pushq %rbx;
pushq %rsi; /* dst */
inpack_dec3();
decrypt_cycle3(RAB, RCD, 7);
decrypt_cycle3(RAB, RCD, 6);
decrypt_cycle3(RAB, RCD, 5);
decrypt_cycle3(RAB, RCD, 4);
decrypt_cycle3(RAB, RCD, 3);
decrypt_cycle3(RAB, RCD, 2);
decrypt_cycle3(RAB, RCD, 1);
decrypt_cycle3(RAB, RCD, 0);
popq RIO; /* dst */
outunpack_dec3();
popq %rbx;
popq %rbp;
popq %r12;
popq %r13;
popq %r14;
popq %r15;
ret;
......@@ -221,10 +221,9 @@
twofish_enc_blk:
pushq R1
/* %rdi contains the crypto tfm address */
/* %rdi contains the ctx address */
/* %rsi contains the output address */
/* %rdx contains the input address */
add $crypto_tfm_ctx_offset, %rdi /* set ctx address */
/* ctx address is moved to free one non-rex register
as target for the 8bit high operations */
mov %rdi, %r11
......@@ -274,10 +273,9 @@ twofish_enc_blk:
twofish_dec_blk:
pushq R1
/* %rdi contains the crypto tfm address */
/* %rdi contains the ctx address */
/* %rsi contains the output address */
/* %rdx contains the input address */
add $crypto_tfm_ctx_offset, %rdi /* set ctx address */
/* ctx address is moved to free one non-rex register
as target for the 8bit high operations */
mov %rdi, %r11
......
......@@ -44,17 +44,21 @@
#include <linux/module.h>
#include <linux/types.h>
asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(twofish_enc_blk);
asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
EXPORT_SYMBOL_GPL(twofish_dec_blk);
static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
twofish_enc_blk(tfm, dst, src);
twofish_enc_blk(crypto_tfm_ctx(tfm), dst, src);
}
static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
twofish_dec_blk(tfm, dst, src);
twofish_dec_blk(crypto_tfm_ctx(tfm), dst, src);
}
static struct crypto_alg alg = {
......
/*
* Glue Code for 3-way parallel assembler optimized version of Twofish
*
* Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
* CTR part based on code (crypto/ctr.c) by:
* (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <crypto/algapi.h>
#include <crypto/twofish.h>
#include <crypto/b128ops.h>
/* regular block cipher functions from twofish_x86_64 module */
asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
/* 3-way parallel cipher functions */
asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src)
{
__twofish_enc_blk_3way(ctx, dst, src, false);
}
static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src)
{
__twofish_enc_blk_3way(ctx, dst, src, true);
}
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
void (*fn)(struct twofish_ctx *, u8 *, const u8 *),
void (*fn_3way)(struct twofish_ctx *, u8 *, const u8 *))
{
struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = TF_BLOCK_SIZE;
unsigned int nbytes;
int err;
err = blkcipher_walk_virt(desc, walk);
while ((nbytes = walk->nbytes)) {
u8 *wsrc = walk->src.virt.addr;
u8 *wdst = walk->dst.virt.addr;
/* Process three block batch */
if (nbytes >= bsize * 3) {
do {
fn_3way(ctx, wdst, wsrc);
wsrc += bsize * 3;
wdst += bsize * 3;
nbytes -= bsize * 3;
} while (nbytes >= bsize * 3);
if (nbytes < bsize)
goto done;
}
/* Handle leftovers */
do {
fn(ctx, wdst, wsrc);
wsrc += bsize;
wdst += bsize;
nbytes -= bsize;
} while (nbytes >= bsize);
done:
err = blkcipher_walk_done(desc, walk, nbytes);
}
return err;
}
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_crypt(desc, &walk, twofish_enc_blk, twofish_enc_blk_3way);
}
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_crypt(desc, &walk, twofish_dec_blk, twofish_dec_blk_3way);
}
static struct crypto_alg blk_ecb_alg = {
.cra_name = "ecb(twofish)",
.cra_driver_name = "ecb-twofish-3way",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE,
.max_keysize = TF_MAX_KEY_SIZE,
.setkey = twofish_setkey,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
},
},
};
static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = TF_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u128 *src = (u128 *)walk->src.virt.addr;
u128 *dst = (u128 *)walk->dst.virt.addr;
u128 *iv = (u128 *)walk->iv;
do {
u128_xor(dst, src, iv);
twofish_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
iv = dst;
src += 1;
dst += 1;
nbytes -= bsize;
} while (nbytes >= bsize);
u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv);
return nbytes;
}
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
nbytes = __cbc_encrypt(desc, &walk);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = TF_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u128 *src = (u128 *)walk->src.virt.addr;
u128 *dst = (u128 *)walk->dst.virt.addr;
u128 ivs[3 - 1];
u128 last_iv;
/* Start of the last block. */
src += nbytes / bsize - 1;
dst += nbytes / bsize - 1;
last_iv = *src;
/* Process three block batch */
if (nbytes >= bsize * 3) {
do {
nbytes -= bsize * (3 - 1);
src -= 3 - 1;
dst -= 3 - 1;
ivs[0] = src[0];
ivs[1] = src[1];
twofish_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src);
u128_xor(dst + 1, dst + 1, ivs + 0);
u128_xor(dst + 2, dst + 2, ivs + 1);
nbytes -= bsize;
if (nbytes < bsize)
goto done;
u128_xor(dst, dst, src - 1);
src -= 1;
dst -= 1;
} while (nbytes >= bsize * 3);
if (nbytes < bsize)
goto done;
}
/* Handle leftovers */
for (;;) {
twofish_dec_blk(ctx, (u8 *)dst, (u8 *)src);
nbytes -= bsize;
if (nbytes < bsize)
break;
u128_xor(dst, dst, src - 1);
src -= 1;
dst -= 1;
}
done:
u128_xor(dst, dst, (u128 *)walk->iv);
*(u128 *)walk->iv = last_iv;
return nbytes;
}
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
nbytes = __cbc_decrypt(desc, &walk);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static struct crypto_alg blk_cbc_alg = {
.cra_name = "cbc(twofish)",
.cra_driver_name = "cbc-twofish-3way",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE,
.max_keysize = TF_MAX_KEY_SIZE,
.ivsize = TF_BLOCK_SIZE,
.setkey = twofish_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
},
};
static inline void u128_to_be128(be128 *dst, const u128 *src)
{
dst->a = cpu_to_be64(src->a);
dst->b = cpu_to_be64(src->b);
}
static inline void be128_to_u128(u128 *dst, const be128 *src)
{
dst->a = be64_to_cpu(src->a);
dst->b = be64_to_cpu(src->b);
}
static inline void u128_inc(u128 *i)
{
i->b++;
if (!i->b)
i->a++;
}
static void ctr_crypt_final(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
u8 *ctrblk = walk->iv;
u8 keystream[TF_BLOCK_SIZE];
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
twofish_enc_blk(ctx, keystream, ctrblk);
crypto_xor(keystream, src, nbytes);
memcpy(dst, keystream, nbytes);
crypto_inc(ctrblk, TF_BLOCK_SIZE);
}
static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = TF_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u128 *src = (u128 *)walk->src.virt.addr;
u128 *dst = (u128 *)walk->dst.virt.addr;
u128 ctrblk;
be128 ctrblocks[3];
be128_to_u128(&ctrblk, (be128 *)walk->iv);
/* Process three block batch */
if (nbytes >= bsize * 3) {
do {
if (dst != src) {
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
}
/* create ctrblks for parallel encrypt */
u128_to_be128(&ctrblocks[0], &ctrblk);
u128_inc(&ctrblk);
u128_to_be128(&ctrblocks[1], &ctrblk);
u128_inc(&ctrblk);
u128_to_be128(&ctrblocks[2], &ctrblk);
u128_inc(&ctrblk);
twofish_enc_blk_xor_3way(ctx, (u8 *)dst,
(u8 *)ctrblocks);
src += 3;
dst += 3;
nbytes -= bsize * 3;
} while (nbytes >= bsize * 3);
if (nbytes < bsize)
goto done;
}
/* Handle leftovers */
do {
if (dst != src)
*dst = *src;
u128_to_be128(&ctrblocks[0], &ctrblk);
u128_inc(&ctrblk);
twofish_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
u128_xor(dst, dst, (u128 *)ctrblocks);
src += 1;
dst += 1;
nbytes -= bsize;
} while (nbytes >= bsize);
done:
u128_to_be128((be128 *)walk->iv, &ctrblk);
return nbytes;
}
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, TF_BLOCK_SIZE);
while ((nbytes = walk.nbytes) >= TF_BLOCK_SIZE) {
nbytes = __ctr_crypt(desc, &walk);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
if (walk.nbytes) {
ctr_crypt_final(desc, &walk);
err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
}
static struct crypto_alg blk_ctr_alg = {
.cra_name = "ctr(twofish)",
.cra_driver_name = "ctr-twofish-3way",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE,
.max_keysize = TF_MAX_KEY_SIZE,
.ivsize = TF_BLOCK_SIZE,
.setkey = twofish_setkey,
.encrypt = ctr_crypt,
.decrypt = ctr_crypt,
},
},
};
int __init init(void)
{
int err;
err = crypto_register_alg(&blk_ecb_alg);
if (err)
goto ecb_err;
err = crypto_register_alg(&blk_cbc_alg);
if (err)
goto cbc_err;
err = crypto_register_alg(&blk_ctr_alg);
if (err)
goto ctr_err;
return 0;
ctr_err:
crypto_unregister_alg(&blk_cbc_alg);
cbc_err:
crypto_unregister_alg(&blk_ecb_alg);
ecb_err:
return err;
}
void __exit fini(void)
{
crypto_unregister_alg(&blk_ctr_alg);
crypto_unregister_alg(&blk_cbc_alg);
crypto_unregister_alg(&blk_ecb_alg);
}
module_init(init);
module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
MODULE_ALIAS("twofish");
MODULE_ALIAS("twofish-asm");
......@@ -259,7 +259,9 @@ extern const char * const x86_power_flags[32];
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP)
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
......@@ -287,6 +289,7 @@ extern const char * const x86_power_flags[32];
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
......
......@@ -100,6 +100,14 @@ config CRYPTO_MANAGER2
select CRYPTO_BLKCIPHER2
select CRYPTO_PCOMP2
config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
depends on NET
select CRYPTO_MANAGER
help
Userapace configuration for cryptographic instantiations such as
cbc(aes).
config CRYPTO_MANAGER_DISABLE_TESTS
bool "Disable run-time self tests"
default y
......@@ -407,6 +415,16 @@ config CRYPTO_SHA1
help
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
config CRYPTO_SHA1_SSSE3
tristate "SHA1 digest algorithm (SSSE3/AVX)"
depends on X86 && 64BIT
select CRYPTO_SHA1
select CRYPTO_HASH
help
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
using Supplemental SSE3 (SSSE3) instructions or Advanced Vector
Extensions (AVX), when available.
config CRYPTO_SHA256
tristate "SHA224 and SHA256 digest algorithm"
select CRYPTO_HASH
......@@ -590,6 +608,7 @@ config CRYPTO_ARC4
config CRYPTO_BLOWFISH
tristate "Blowfish cipher algorithm"
select CRYPTO_ALGAPI
select CRYPTO_BLOWFISH_COMMON
help
Blowfish cipher algorithm, by Bruce Schneier.
......@@ -600,6 +619,30 @@ config CRYPTO_BLOWFISH
See also:
<http://www.schneier.com/blowfish.html>
config CRYPTO_BLOWFISH_COMMON
tristate
help
Common parts of the Blowfish cipher algorithm shared by the
generic c and the assembler implementations.
See also:
<http://www.schneier.com/blowfish.html>
config CRYPTO_BLOWFISH_X86_64
tristate "Blowfish cipher algorithm (x86_64)"
depends on (X86 || UML_X86) && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_BLOWFISH_COMMON
help
Blowfish cipher algorithm (x86_64), by Bruce Schneier.
This is a variable key length cipher which can use keys from 32
bits to 448 bits in length. It's fast, simple and specifically
designed for use on "large microprocessors".
See also:
<http://www.schneier.com/blowfish.html>
config CRYPTO_CAMELLIA
tristate "Camellia cipher algorithms"
depends on CRYPTO
......@@ -793,6 +836,26 @@ config CRYPTO_TWOFISH_X86_64
See also:
<http://www.schneier.com/twofish.html>
config CRYPTO_TWOFISH_X86_64_3WAY
tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
depends on (X86 || UML_X86) && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
help
Twofish cipher algorithm (x86_64, 3-way parallel).
Twofish was submitted as an AES (Advanced Encryption Standard)
candidate cipher by researchers at CounterPane Systems. It is a
16 round block cipher supporting key sizes of 128, 192, and 256
bits.
This module provides Twofish cipher algorithm that processes three
blocks parallel, utilizing resources of out-of-order CPUs better.
See also:
<http://www.schneier.com/twofish.html>
comment "Compression"
config CRYPTO_DEFLATE
......
......@@ -31,6 +31,7 @@ obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o
cryptomgr-y := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
......@@ -60,7 +61,8 @@ obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
obj-$(CONFIG_CRYPTO_DES) += des_generic.o
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o
obj-$(CONFIG_CRYPTO_BLOWFISH_COMMON) += blowfish_common.o
obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o
......
......@@ -23,6 +23,8 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include <crypto/scatterwalk.h>
......@@ -381,6 +383,28 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
return 0;
}
static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_blkcipher rblkcipher;
snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "ablkcipher");
snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
alg->cra_ablkcipher.geniv ?: "<default>");
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
sizeof(struct crypto_report_blkcipher), &rblkcipher);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
......@@ -403,6 +427,7 @@ const struct crypto_type crypto_ablkcipher_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_ablkcipher_show,
#endif
.report = crypto_ablkcipher_report,
};
EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
......@@ -432,6 +457,28 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
return 0;
}
static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_blkcipher rblkcipher;
snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "givcipher");
snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
alg->cra_ablkcipher.geniv ?: "<built-in>");
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
sizeof(struct crypto_report_blkcipher), &rblkcipher);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
......@@ -454,6 +501,7 @@ const struct crypto_type crypto_givcipher_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_givcipher_show,
#endif
.report = crypto_givcipher_report,
};
EXPORT_SYMBOL_GPL(crypto_givcipher_type);
......
......@@ -21,6 +21,8 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include "internal.h"
......@@ -109,6 +111,28 @@ static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
return 0;
}
static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_aead raead;
struct aead_alg *aead = &alg->cra_aead;
snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "aead");
snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s",
aead->geniv ?: "<built-in>");
raead.blocksize = alg->cra_blocksize;
raead.maxauthsize = aead->maxauthsize;
raead.ivsize = aead->ivsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD,
sizeof(struct crypto_report_aead), &raead);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
......@@ -130,6 +154,7 @@ const struct crypto_type crypto_aead_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_aead_show,
#endif
.report = crypto_aead_report,
};
EXPORT_SYMBOL_GPL(crypto_aead_type);
......@@ -165,6 +190,28 @@ static int crypto_init_nivaead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
return 0;
}
static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_aead raead;
struct aead_alg *aead = &alg->cra_aead;
snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "nivaead");
snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s", aead->geniv);
raead.blocksize = alg->cra_blocksize;
raead.maxauthsize = aead->maxauthsize;
raead.ivsize = aead->ivsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD,
sizeof(struct crypto_report_aead), &raead);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
......@@ -186,6 +233,7 @@ const struct crypto_type crypto_nivaead_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_nivaead_show,
#endif
.report = crypto_nivaead_report,
};
EXPORT_SYMBOL_GPL(crypto_nivaead_type);
......
......@@ -21,6 +21,8 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include "internal.h"
......@@ -397,6 +399,24 @@ static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
return sizeof(struct crypto_shash *);
}
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "ahash");
rhash.blocksize = alg->cra_blocksize;
rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH,
sizeof(struct crypto_report_hash), &rhash);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
......@@ -415,6 +435,7 @@ const struct crypto_type crypto_ahash_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
.report = crypto_ahash_report,
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AHASH,
......
......@@ -22,8 +22,6 @@
#include "internal.h"
static void crypto_remove_final(struct list_head *list);
static LIST_HEAD(crypto_template_list);
void crypto_larval_error(const char *name, u32 type, u32 mask)
......@@ -129,9 +127,8 @@ static void crypto_remove_spawn(struct crypto_spawn *spawn,
BUG_ON(!list_empty(&inst->alg.cra_users));
}
static void crypto_remove_spawns(struct crypto_alg *alg,
struct list_head *list,
struct crypto_alg *nalg)
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
struct crypto_alg *nalg)
{
u32 new_type = (nalg ?: alg)->cra_flags;
struct crypto_spawn *spawn, *n;
......@@ -177,6 +174,7 @@ static void crypto_remove_spawns(struct crypto_alg *alg,
crypto_remove_spawn(spawn, list);
}
}
EXPORT_SYMBOL_GPL(crypto_remove_spawns);
static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
{
......@@ -321,7 +319,7 @@ void crypto_alg_tested(const char *name, int err)
}
EXPORT_SYMBOL_GPL(crypto_alg_tested);
static void crypto_remove_final(struct list_head *list)
void crypto_remove_final(struct list_head *list)
{
struct crypto_alg *alg;
struct crypto_alg *n;
......@@ -331,6 +329,7 @@ static void crypto_remove_final(struct list_head *list)
crypto_alg_put(alg);
}
}
EXPORT_SYMBOL_GPL(crypto_remove_final);
static void crypto_wait_for_test(struct crypto_larval *larval)
{
......@@ -493,6 +492,7 @@ int crypto_register_instance(struct crypto_template *tmpl,
goto err;
inst->alg.cra_module = tmpl->module;
inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE;
down_write(&crypto_alg_sem);
......
......@@ -24,6 +24,8 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include "internal.h"
......@@ -492,6 +494,28 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
return crypto_init_blkcipher_ops_async(tfm);
}
static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_blkcipher rblkcipher;
snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "blkcipher");
snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
alg->cra_blkcipher.geniv ?: "<default>");
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
sizeof(struct crypto_report_blkcipher), &rblkcipher);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
......@@ -511,6 +535,7 @@ const struct crypto_type crypto_blkcipher_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_blkcipher_show,
#endif
.report = crypto_blkcipher_report,
};
EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
......
/*
* Cryptographic API.
*
* Common Blowfish algorithm parts shared between the c and assembler
* implementations.
*
* Blowfish Cipher Algorithm, by Bruce Schneier.
* http://www.counterpane.com/blowfish.html
*
......@@ -22,15 +25,7 @@
#include <asm/byteorder.h>
#include <linux/crypto.h>
#include <linux/types.h>
#define BF_BLOCK_SIZE 8
#define BF_MIN_KEY_SIZE 4
#define BF_MAX_KEY_SIZE 56
struct bf_ctx {
u32 p[18];
u32 s[1024];
};
#include <crypto/blowfish.h>
static const u32 bf_pbox[16 + 2] = {
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344,
......@@ -309,9 +304,9 @@ static const u32 bf_sbox[256 * 4] = {
#define GET32_0(x) (((x) >> (24)) & (0xff))
#define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \
S[512 + GET32_2(x)]) + S[768 + GET32_3(x)])
S[512 + GET32_2(x)]) + S[768 + GET32_3(x)])
#define ROUND(a, b, n) b ^= P[n]; a ^= bf_F (b)
#define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); })
/*
* The blowfish encipher, processes 64-bit blocks.
......@@ -348,57 +343,10 @@ static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src)
dst[1] = yl;
}
static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const __be32 *in_blk = (const __be32 *)src;
__be32 *const out_blk = (__be32 *)dst;
u32 in32[2], out32[2];
in32[0] = be32_to_cpu(in_blk[0]);
in32[1] = be32_to_cpu(in_blk[1]);
encrypt_block(crypto_tfm_ctx(tfm), out32, in32);
out_blk[0] = cpu_to_be32(out32[0]);
out_blk[1] = cpu_to_be32(out32[1]);
}
static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *in_blk = (const __be32 *)src;
__be32 *const out_blk = (__be32 *)dst;
const u32 *P = ctx->p;
const u32 *S = ctx->s;
u32 yl = be32_to_cpu(in_blk[0]);
u32 yr = be32_to_cpu(in_blk[1]);
ROUND(yr, yl, 17);
ROUND(yl, yr, 16);
ROUND(yr, yl, 15);
ROUND(yl, yr, 14);
ROUND(yr, yl, 13);
ROUND(yl, yr, 12);
ROUND(yr, yl, 11);
ROUND(yl, yr, 10);
ROUND(yr, yl, 9);
ROUND(yl, yr, 8);
ROUND(yr, yl, 7);
ROUND(yl, yr, 6);
ROUND(yr, yl, 5);
ROUND(yl, yr, 4);
ROUND(yr, yl, 3);
ROUND(yl, yr, 2);
yl ^= P[1];
yr ^= P[0];
out_blk[0] = cpu_to_be32(yr);
out_blk[1] = cpu_to_be32(yl);
}
/*
* Calculates the blowfish S and P boxes for encryption and decryption.
*/
static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
int blowfish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *P = ctx->p;
......@@ -448,35 +396,7 @@ static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
/* Bruce says not to bother with the weak key check. */
return 0;
}
static struct crypto_alg alg = {
.cra_name = "blowfish",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = BF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bf_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = BF_MIN_KEY_SIZE,
.cia_max_keysize = BF_MAX_KEY_SIZE,
.cia_setkey = bf_setkey,
.cia_encrypt = bf_encrypt,
.cia_decrypt = bf_decrypt } }
};
static int __init blowfish_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit blowfish_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(blowfish_mod_init);
module_exit(blowfish_mod_fini);
EXPORT_SYMBOL_GPL(blowfish_setkey);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
MODULE_DESCRIPTION("Blowfish Cipher common functions");
/*
* Cryptographic API.
*
* Blowfish Cipher Algorithm, by Bruce Schneier.
* http://www.counterpane.com/blowfish.html
*
* Adapted from Kerneli implementation.
*
* Copyright (c) Herbert Valerio Riedel <hvr@hvrlab.org>
* Copyright (c) Kyle McMartin <kyle@debian.org>
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
#include <linux/crypto.h>
#include <linux/types.h>
#include <crypto/blowfish.h>
/*
* Round loop unrolling macros, S is a pointer to a S-Box array
* organized in 4 unsigned longs at a row.
*/
#define GET32_3(x) (((x) & 0xff))
#define GET32_2(x) (((x) >> (8)) & (0xff))
#define GET32_1(x) (((x) >> (16)) & (0xff))
#define GET32_0(x) (((x) >> (24)) & (0xff))
#define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \
S[512 + GET32_2(x)]) + S[768 + GET32_3(x)])
#define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); })
static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *in_blk = (const __be32 *)src;
__be32 *const out_blk = (__be32 *)dst;
const u32 *P = ctx->p;
const u32 *S = ctx->s;
u32 yl = be32_to_cpu(in_blk[0]);
u32 yr = be32_to_cpu(in_blk[1]);
ROUND(yr, yl, 0);
ROUND(yl, yr, 1);
ROUND(yr, yl, 2);
ROUND(yl, yr, 3);
ROUND(yr, yl, 4);
ROUND(yl, yr, 5);
ROUND(yr, yl, 6);
ROUND(yl, yr, 7);
ROUND(yr, yl, 8);
ROUND(yl, yr, 9);
ROUND(yr, yl, 10);
ROUND(yl, yr, 11);
ROUND(yr, yl, 12);
ROUND(yl, yr, 13);
ROUND(yr, yl, 14);
ROUND(yl, yr, 15);
yl ^= P[16];
yr ^= P[17];
out_blk[0] = cpu_to_be32(yr);
out_blk[1] = cpu_to_be32(yl);
}
static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *in_blk = (const __be32 *)src;
__be32 *const out_blk = (__be32 *)dst;
const u32 *P = ctx->p;
const u32 *S = ctx->s;
u32 yl = be32_to_cpu(in_blk[0]);
u32 yr = be32_to_cpu(in_blk[1]);
ROUND(yr, yl, 17);
ROUND(yl, yr, 16);
ROUND(yr, yl, 15);
ROUND(yl, yr, 14);
ROUND(yr, yl, 13);
ROUND(yl, yr, 12);
ROUND(yr, yl, 11);
ROUND(yl, yr, 10);
ROUND(yr, yl, 9);
ROUND(yl, yr, 8);
ROUND(yr, yl, 7);
ROUND(yl, yr, 6);
ROUND(yr, yl, 5);
ROUND(yl, yr, 4);
ROUND(yr, yl, 3);
ROUND(yl, yr, 2);
yl ^= P[1];
yr ^= P[0];
out_blk[0] = cpu_to_be32(yr);
out_blk[1] = cpu_to_be32(yl);
}
static struct crypto_alg alg = {
.cra_name = "blowfish",
.cra_driver_name = "blowfish-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = BF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bf_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = BF_MIN_KEY_SIZE,
.cia_max_keysize = BF_MAX_KEY_SIZE,
.cia_setkey = blowfish_setkey,
.cia_encrypt = bf_encrypt,
.cia_decrypt = bf_decrypt } }
};
static int __init blowfish_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit blowfish_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(blowfish_mod_init);
module_exit(blowfish_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
MODULE_ALIAS("blowfish");
......@@ -945,7 +945,7 @@ static void __exit cryptd_exit(void)
crypto_unregister_template(&cryptd_tmpl);
}
module_init(cryptd_init);
subsys_initcall(cryptd_init);
module_exit(cryptd_exit);
MODULE_LICENSE("GPL");
......
/*
* Crypto user configuration API.
*
* Copyright (C) 2011 secunet Security Networks AG
* Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include <linux/security.h>
#include <net/net_namespace.h>
#include "internal.h"
DEFINE_MUTEX(crypto_cfg_mutex);
/* The crypto netlink socket */
static struct sock *crypto_nlsk;
struct crypto_dump_info {
struct sk_buff *in_skb;
struct sk_buff *out_skb;
u32 nlmsg_seq;
u16 nlmsg_flags;
};
static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
{
struct crypto_alg *q, *alg = NULL;
down_read(&crypto_alg_sem);
if (list_empty(&crypto_alg_list))
return NULL;
list_for_each_entry(q, &crypto_alg_list, cra_list) {
int match = 0;
if ((q->cra_flags ^ p->cru_type) & p->cru_mask)
continue;
if (strlen(p->cru_driver_name))
match = !strcmp(q->cra_driver_name,
p->cru_driver_name);
else if (!exact)
match = !strcmp(q->cra_name, p->cru_name);
if (match) {
alg = q;
break;
}
}
up_read(&crypto_alg_sem);
return alg;
}
static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_cipher rcipher;
snprintf(rcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "cipher");
rcipher.blocksize = alg->cra_blocksize;
rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
rcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_CIPHER,
sizeof(struct crypto_report_cipher), &rcipher);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rcomp;
snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression");
NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS,
sizeof(struct crypto_report_comp), &rcomp);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static int crypto_report_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg, struct sk_buff *skb)
{
memcpy(&ualg->cru_name, &alg->cra_name, sizeof(ualg->cru_name));
memcpy(&ualg->cru_driver_name, &alg->cra_driver_name,
sizeof(ualg->cru_driver_name));
memcpy(&ualg->cru_module_name, module_name(alg->cra_module),
CRYPTO_MAX_ALG_NAME);
ualg->cru_flags = alg->cra_flags;
ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
NLA_PUT_U32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority);
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
struct crypto_report_larval rl;
snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval");
NLA_PUT(skb, CRYPTOCFGA_REPORT_LARVAL,
sizeof(struct crypto_report_larval), &rl);
goto out;
}
if (alg->cra_type && alg->cra_type->report) {
if (alg->cra_type->report(skb, alg))
goto nla_put_failure;
goto out;
}
switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
case CRYPTO_ALG_TYPE_CIPHER:
if (crypto_report_cipher(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_COMPRESS:
if (crypto_report_comp(skb, alg))
goto nla_put_failure;
break;
}
out:
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static int crypto_report_alg(struct crypto_alg *alg,
struct crypto_dump_info *info)
{
struct sk_buff *in_skb = info->in_skb;
struct sk_buff *skb = info->out_skb;
struct nlmsghdr *nlh;
struct crypto_user_alg *ualg;
int err = 0;
nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, info->nlmsg_seq,
CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags);
if (!nlh) {
err = -EMSGSIZE;
goto out;
}
ualg = nlmsg_data(nlh);
err = crypto_report_one(alg, ualg, skb);
if (err) {
nlmsg_cancel(skb, nlh);
goto out;
}
nlmsg_end(skb, nlh);
out:
return err;
}
static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
struct nlattr **attrs)
{
struct crypto_user_alg *p = nlmsg_data(in_nlh);
struct crypto_alg *alg;
struct sk_buff *skb;
struct crypto_dump_info info;
int err;
if (!p->cru_driver_name)
return -EINVAL;
alg = crypto_alg_match(p, 1);
if (!alg)
return -ENOENT;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
info.in_skb = in_skb;
info.out_skb = skb;
info.nlmsg_seq = in_nlh->nlmsg_seq;
info.nlmsg_flags = 0;
err = crypto_report_alg(alg, &info);
if (err)
return err;
return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).pid);
}
static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
{
struct crypto_alg *alg;
struct crypto_dump_info info;
int err;
if (cb->args[0])
goto out;
cb->args[0] = 1;
info.in_skb = cb->skb;
info.out_skb = skb;
info.nlmsg_seq = cb->nlh->nlmsg_seq;
info.nlmsg_flags = NLM_F_MULTI;
list_for_each_entry(alg, &crypto_alg_list, cra_list) {
err = crypto_report_alg(alg, &info);
if (err)
goto out_err;
}
out:
return skb->len;
out_err:
return err;
}
static int crypto_dump_report_done(struct netlink_callback *cb)
{
return 0;
}
static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct crypto_alg *alg;
struct crypto_user_alg *p = nlmsg_data(nlh);
struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
LIST_HEAD(list);
if (priority && !strlen(p->cru_driver_name))
return -EINVAL;
alg = crypto_alg_match(p, 1);
if (!alg)
return -ENOENT;
down_write(&crypto_alg_sem);
crypto_remove_spawns(alg, &list, NULL);
if (priority)
alg->cra_priority = nla_get_u32(priority);
up_write(&crypto_alg_sem);
crypto_remove_final(&list);
return 0;
}
static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct crypto_alg *alg;
struct crypto_user_alg *p = nlmsg_data(nlh);
alg = crypto_alg_match(p, 1);
if (!alg)
return -ENOENT;
/* We can not unregister core algorithms such as aes-generic.
* We would loose the reference in the crypto_alg_list to this algorithm
* if we try to unregister. Unregistering such an algorithm without
* removing the module is not possible, so we restrict to crypto
* instances that are build from templates. */
if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
return -EINVAL;
if (atomic_read(&alg->cra_refcnt) != 1)
return -EBUSY;
return crypto_unregister_alg(alg);
}
static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
int exact;
const char *name;
struct crypto_alg *alg;
struct crypto_user_alg *p = nlmsg_data(nlh);
struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
if (strlen(p->cru_driver_name))
exact = 1;
if (priority && !exact)
return -EINVAL;
alg = crypto_alg_match(p, exact);
if (alg)
return -EEXIST;
if (strlen(p->cru_driver_name))
name = p->cru_driver_name;
else
name = p->cru_name;
alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
if (IS_ERR(alg))
return PTR_ERR(alg);
down_write(&crypto_alg_sem);
if (priority)
alg->cra_priority = nla_get_u32(priority);
up_write(&crypto_alg_sem);
crypto_mod_put(alg);
return 0;
}
#define MSGSIZE(type) sizeof(struct type)
static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
[CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
};
static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = {
[CRYPTOCFGA_PRIORITY_VAL] = { .type = NLA_U32},
};
#undef MSGSIZE
static struct crypto_link {
int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
int (*dump)(struct sk_buff *, struct netlink_callback *);
int (*done)(struct netlink_callback *);
} crypto_dispatch[CRYPTO_NR_MSGTYPES] = {
[CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = { .doit = crypto_add_alg},
[CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = { .doit = crypto_del_alg},
[CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = { .doit = crypto_update_alg},
[CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = { .doit = crypto_report,
.dump = crypto_dump_report,
.done = crypto_dump_report_done},
};
static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct nlattr *attrs[CRYPTOCFGA_MAX+1];
struct crypto_link *link;
int type, err;
type = nlh->nlmsg_type;
if (type > CRYPTO_MSG_MAX)
return -EINVAL;
type -= CRYPTO_MSG_BASE;
link = &crypto_dispatch[type];
if (security_netlink_recv(skb, CAP_NET_ADMIN))
return -EPERM;
if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
(nlh->nlmsg_flags & NLM_F_DUMP))) {
if (link->dump == NULL)
return -EINVAL;
return netlink_dump_start(crypto_nlsk, skb, nlh,
link->dump, link->done, 0);
}
err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
crypto_policy);
if (err < 0)
return err;
if (link->doit == NULL)
return -EINVAL;
return link->doit(skb, nlh, attrs);
}
static void crypto_netlink_rcv(struct sk_buff *skb)
{
mutex_lock(&crypto_cfg_mutex);
netlink_rcv_skb(skb, &crypto_user_rcv_msg);
mutex_unlock(&crypto_cfg_mutex);
}
static int __init crypto_user_init(void)
{
crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO,
0, crypto_netlink_rcv,
NULL, THIS_MODULE);
if (!crypto_nlsk)
return -ENOMEM;
return 0;
}
static void __exit crypto_user_exit(void)
{
netlink_kernel_release(crypto_nlsk);
}
module_init(crypto_user_init);
module_exit(crypto_user_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
MODULE_DESCRIPTION("Crypto userspace configuration API");
......@@ -86,6 +86,9 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask);
void crypto_larval_error(const char *name, u32 type, u32 mask);
void crypto_alg_tested(const char *name, int err);
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
struct crypto_alg *nalg);
void crypto_remove_final(struct list_head *list);
void crypto_shoot_alg(struct crypto_alg *alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
......
......@@ -24,6 +24,8 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include <crypto/compress.h>
#include <crypto/internal/compress.h>
......@@ -46,6 +48,21 @@ static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm)
return 0;
}
static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rpcomp;
snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp");
NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS,
sizeof(struct crypto_report_comp), &rpcomp);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
......@@ -60,6 +77,7 @@ static const struct crypto_type crypto_pcomp_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_pcomp_show,
#endif
.report = crypto_pcomp_report,
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_PCOMPRESS,
......
......@@ -21,6 +21,8 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
static DEFINE_MUTEX(crypto_default_rng_lock);
struct crypto_rng *crypto_default_rng;
......@@ -58,6 +60,23 @@ static int crypto_init_rng_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
return 0;
}
static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_rng rrng;
snprintf(rrng.type, CRYPTO_MAX_ALG_NAME, "%s", "rng");
rrng.seedsize = alg->cra_rng.seedsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_RNG,
sizeof(struct crypto_report_rng), &rrng);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
......@@ -78,6 +97,7 @@ const struct crypto_type crypto_rng_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_rng_show,
#endif
.report = crypto_rng_report,
};
EXPORT_SYMBOL_GPL(crypto_rng_type);
......
......@@ -36,7 +36,7 @@ static int sha1_init(struct shash_desc *desc)
return 0;
}
static int sha1_update(struct shash_desc *desc, const u8 *data,
int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
......@@ -71,6 +71,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
return 0;
}
EXPORT_SYMBOL(crypto_sha1_update);
/* Add padding and return the message digest. */
......@@ -87,10 +88,10 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
/* Pad out to 56 mod 64 */
index = sctx->count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
sha1_update(desc, padding, padlen);
crypto_sha1_update(desc, padding, padlen);
/* Append length */
sha1_update(desc, (const u8 *)&bits, sizeof(bits));
crypto_sha1_update(desc, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 5; i++)
......@@ -121,7 +122,7 @@ static int sha1_import(struct shash_desc *desc, const void *in)
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_init,
.update = sha1_update,
.update = crypto_sha1_update,
.final = sha1_final,
.export = sha1_export,
.import = sha1_import,
......
......@@ -17,6 +17,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include "internal.h"
......@@ -522,6 +524,24 @@ static unsigned int crypto_shash_extsize(struct crypto_alg *alg)
return alg->cra_ctxsize;
}
static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
struct shash_alg *salg = __crypto_shash_alg(alg);
snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "shash");
rhash.blocksize = alg->cra_blocksize;
rhash.digestsize = salg->digestsize;
NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH,
sizeof(struct crypto_report_hash), &rhash);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
......@@ -541,6 +561,7 @@ static const struct crypto_type crypto_shash_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_shash_show,
#endif
.report = crypto_shash_report,
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SHASH,
......
......@@ -782,11 +782,13 @@ static int do_test(int m)
case 7:
ret += tcrypt_test("ecb(blowfish)");
ret += tcrypt_test("cbc(blowfish)");
ret += tcrypt_test("ctr(blowfish)");
break;
case 8:
ret += tcrypt_test("ecb(twofish)");
ret += tcrypt_test("cbc(twofish)");
ret += tcrypt_test("ctr(twofish)");
break;
case 9:
......@@ -1039,6 +1041,10 @@ static int do_test(int m)
speed_template_16_24_32);
test_cipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
break;
case 203:
......@@ -1050,6 +1056,10 @@ static int do_test(int m)
speed_template_8_32);
test_cipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
break;
case 204:
......
......@@ -1755,6 +1755,36 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}
}, {
.alg = "ctr(blowfish)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = bf_ctr_enc_tv_template,
.count = BF_CTR_ENC_TEST_VECTORS
},
.dec = {
.vecs = bf_ctr_dec_tv_template,
.count = BF_CTR_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ctr(twofish)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = tf_ctr_enc_tv_template,
.count = TF_CTR_ENC_TEST_VECTORS
},
.dec = {
.vecs = tf_ctr_dec_tv_template,
.count = TF_CTR_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "cts(cbc(aes))",
.test = alg_test_skcipher,
......
......@@ -2391,10 +2391,12 @@ static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
/*
* Blowfish test vectors.
*/
#define BF_ENC_TEST_VECTORS 6
#define BF_DEC_TEST_VECTORS 6
#define BF_CBC_ENC_TEST_VECTORS 1
#define BF_CBC_DEC_TEST_VECTORS 1
#define BF_ENC_TEST_VECTORS 7
#define BF_DEC_TEST_VECTORS 7
#define BF_CBC_ENC_TEST_VECTORS 2
#define BF_CBC_DEC_TEST_VECTORS 2
#define BF_CTR_ENC_TEST_VECTORS 2
#define BF_CTR_DEC_TEST_VECTORS 2
static struct cipher_testvec bf_enc_tv_template[] = {
{ /* DES test vectors from OpenSSL */
......@@ -2448,6 +2450,24 @@ static struct cipher_testvec bf_enc_tv_template[] = {
.ilen = 8,
.result = "\xc0\x45\x04\x01\x2e\x4e\x1f\x53",
.rlen = 8,
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.input = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9",
.ilen = 40,
.result = "\x96\x87\x3D\x0C\x7B\xFB\xBD\x1F"
"\xE3\xC1\x99\x6D\x39\xD4\xC2\x7D"
"\xD7\x87\xA1\xF2\xDF\x51\x71\x26"
"\xC2\xF4\x6D\xFF\xF6\xCD\x6B\x40"
"\xE1\xB3\xBF\xD4\x38\x2B\xC8\x3B",
.rlen = 40,
},
};
......@@ -2503,6 +2523,24 @@ static struct cipher_testvec bf_dec_tv_template[] = {
.ilen = 8,
.result = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
.rlen = 8,
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.input = "\x96\x87\x3D\x0C\x7B\xFB\xBD\x1F"
"\xE3\xC1\x99\x6D\x39\xD4\xC2\x7D"
"\xD7\x87\xA1\xF2\xDF\x51\x71\x26"
"\xC2\xF4\x6D\xFF\xF6\xCD\x6B\x40"
"\xE1\xB3\xBF\xD4\x38\x2B\xC8\x3B",
.ilen = 40,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9",
.rlen = 40,
},
};
......@@ -2522,6 +2560,25 @@ static struct cipher_testvec bf_cbc_enc_tv_template[] = {
"\x58\xde\xb9\xe7\x15\x46\x16\xd9"
"\x59\xf1\x65\x2b\xd5\xff\x92\xcc",
.rlen = 32,
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F",
.input = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9",
.ilen = 40,
.result = "\xB4\xFE\xA5\xBB\x3D\x2C\x27\x06"
"\x06\x2B\x3A\x92\xB2\xF5\x5E\x62"
"\x84\xCD\xF7\x66\x7E\x41\x6C\x8E"
"\x1B\xD9\x02\xB6\x48\xB0\x87\x25"
"\x01\x9C\x93\x63\x51\x60\x82\xD2",
.rlen = 40,
},
};
......@@ -2541,16 +2598,125 @@ static struct cipher_testvec bf_cbc_dec_tv_template[] = {
"\x68\x65\x20\x74\x69\x6d\x65\x20"
"\x66\x6f\x72\x20\x00\x00\x00\x00",
.rlen = 32,
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F",
.input = "\xB4\xFE\xA5\xBB\x3D\x2C\x27\x06"
"\x06\x2B\x3A\x92\xB2\xF5\x5E\x62"
"\x84\xCD\xF7\x66\x7E\x41\x6C\x8E"
"\x1B\xD9\x02\xB6\x48\xB0\x87\x25"
"\x01\x9C\x93\x63\x51\x60\x82\xD2",
.ilen = 40,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9",
.rlen = 40,
},
};
static struct cipher_testvec bf_ctr_enc_tv_template[] = {
{ /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F",
.input = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9",
.ilen = 40,
.result = "\xC7\xA3\xDF\xB9\x05\xF4\x9E\x8D"
"\x9E\xDF\x38\x18\x83\x07\xEF\xC1"
"\x93\x3C\xAA\xAA\xFE\x06\x42\xCC"
"\x0D\x70\x86\x5A\x44\xAD\x85\x17"
"\xE4\x1F\x5E\xA5\x89\xAC\x32\xBC",
.rlen = 40,
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F",
.input = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
"\x6D\x04\x9B",
.ilen = 43,
.result = "\xC7\xA3\xDF\xB9\x05\xF4\x9E\x8D"
"\x9E\xDF\x38\x18\x83\x07\xEF\xC1"
"\x93\x3C\xAA\xAA\xFE\x06\x42\xCC"
"\x0D\x70\x86\x5A\x44\xAD\x85\x17"
"\xE4\x1F\x5E\xA5\x89\xAC\x32\xBC"
"\x3D\xA7\xE9",
.rlen = 43,
},
};
static struct cipher_testvec bf_ctr_dec_tv_template[] = {
{ /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F",
.input = "\xC7\xA3\xDF\xB9\x05\xF4\x9E\x8D"
"\x9E\xDF\x38\x18\x83\x07\xEF\xC1"
"\x93\x3C\xAA\xAA\xFE\x06\x42\xCC"
"\x0D\x70\x86\x5A\x44\xAD\x85\x17"
"\xE4\x1F\x5E\xA5\x89\xAC\x32\xBC",
.ilen = 40,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9",
.rlen = 40,
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F",
.input = "\xC7\xA3\xDF\xB9\x05\xF4\x9E\x8D"
"\x9E\xDF\x38\x18\x83\x07\xEF\xC1"
"\x93\x3C\xAA\xAA\xFE\x06\x42\xCC"
"\x0D\x70\x86\x5A\x44\xAD\x85\x17"
"\xE4\x1F\x5E\xA5\x89\xAC\x32\xBC"
"\x3D\xA7\xE9",
.ilen = 43,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
"\x6D\x04\x9B",
.rlen = 43,
},
};
/*
* Twofish test vectors.
*/
#define TF_ENC_TEST_VECTORS 3
#define TF_DEC_TEST_VECTORS 3
#define TF_CBC_ENC_TEST_VECTORS 4
#define TF_CBC_DEC_TEST_VECTORS 4
#define TF_ENC_TEST_VECTORS 4
#define TF_DEC_TEST_VECTORS 4
#define TF_CBC_ENC_TEST_VECTORS 5
#define TF_CBC_DEC_TEST_VECTORS 5
#define TF_CTR_ENC_TEST_VECTORS 2
#define TF_CTR_DEC_TEST_VECTORS 2
static struct cipher_testvec tf_enc_tv_template[] = {
{
......@@ -2582,6 +2748,30 @@ static struct cipher_testvec tf_enc_tv_template[] = {
.result = "\x37\x52\x7b\xe0\x05\x23\x34\xb8"
"\x9f\x0c\xfc\xca\xe8\x7c\xfa\x20",
.rlen = 16,
}, { /* Generated with Crypto++ */
.key = "\x3F\x85\x62\x3F\x1C\xF9\xD6\x1C"
"\xF9\xD6\xB3\x90\x6D\x4A\x90\x6D"
"\x4A\x27\x04\xE1\x27\x04\xE1\xBE"
"\x9B\x78\xBE\x9B\x78\x55\x32\x0F",
.klen = 32,
.input = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
"\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
"\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
"\x51\xE8\x5C\xF3\x8A\x21\x95\x2C",
.ilen = 64,
.result = "\x88\xCB\x1E\xC2\xAF\x8A\x97\xFF"
"\xF6\x90\x46\x9C\x4A\x0F\x08\xDC"
"\xDE\xAB\xAD\xFA\xFC\xA8\xC2\x3D"
"\xE0\xE4\x8B\x3F\xD5\xA3\xF7\x14"
"\x34\x9E\xB6\x08\xB2\xDD\xA8\xF5"
"\xDF\xFA\xC7\xE8\x09\x50\x76\x08"
"\xA2\xB6\x6A\x59\xC0\x2B\x6D\x05"
"\x89\xF6\x82\xF0\xD3\xDB\x06\x02",
.rlen = 64,
},
};
......@@ -2615,6 +2805,30 @@ static struct cipher_testvec tf_dec_tv_template[] = {
.ilen = 16,
.result = zeroed_string,
.rlen = 16,
}, { /* Generated with Crypto++ */
.key = "\x3F\x85\x62\x3F\x1C\xF9\xD6\x1C"
"\xF9\xD6\xB3\x90\x6D\x4A\x90\x6D"
"\x4A\x27\x04\xE1\x27\x04\xE1\xBE"
"\x9B\x78\xBE\x9B\x78\x55\x32\x0F",
.klen = 32,
.input = "\x88\xCB\x1E\xC2\xAF\x8A\x97\xFF"
"\xF6\x90\x46\x9C\x4A\x0F\x08\xDC"
"\xDE\xAB\xAD\xFA\xFC\xA8\xC2\x3D"
"\xE0\xE4\x8B\x3F\xD5\xA3\xF7\x14"
"\x34\x9E\xB6\x08\xB2\xDD\xA8\xF5"
"\xDF\xFA\xC7\xE8\x09\x50\x76\x08"
"\xA2\xB6\x6A\x59\xC0\x2B\x6D\x05"
"\x89\xF6\x82\xF0\xD3\xDB\x06\x02",
.ilen = 64,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
"\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
"\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
"\x51\xE8\x5C\xF3\x8A\x21\x95\x2C",
.rlen = 64,
},
};
......@@ -2661,6 +2875,32 @@ static struct cipher_testvec tf_cbc_enc_tv_template[] = {
"\x05\xef\x8c\x61\xa8\x11\x58\x26"
"\x34\xba\x5c\xb7\x10\x6a\xa6\x41",
.rlen = 48,
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
.input = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
"\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
"\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
"\x51\xE8\x5C\xF3\x8A\x21\x95\x2C",
.ilen = 64,
.result = "\xC8\xFF\xF2\x53\xA6\x27\x09\xD1"
"\x33\x38\xC2\xC0\x0C\x14\x7E\xB5"
"\x26\x1B\x05\x0C\x05\x12\x3F\xC0"
"\xF9\x1C\x02\x28\x40\x96\x6F\xD0"
"\x3D\x32\xDF\xDA\x56\x00\x6E\xEE"
"\x5B\x2A\x72\x9D\xC2\x4D\x19\xBC"
"\x8C\x53\xFA\x87\x6F\xDD\x81\xA3"
"\xB1\xD3\x44\x65\xDF\xE7\x63\x38",
.rlen = 64,
},
};
......@@ -2707,6 +2947,148 @@ static struct cipher_testvec tf_cbc_dec_tv_template[] = {
.ilen = 48,
.result = zeroed_string,
.rlen = 48,
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
.input = "\xC8\xFF\xF2\x53\xA6\x27\x09\xD1"
"\x33\x38\xC2\xC0\x0C\x14\x7E\xB5"
"\x26\x1B\x05\x0C\x05\x12\x3F\xC0"
"\xF9\x1C\x02\x28\x40\x96\x6F\xD0"
"\x3D\x32\xDF\xDA\x56\x00\x6E\xEE"
"\x5B\x2A\x72\x9D\xC2\x4D\x19\xBC"
"\x8C\x53\xFA\x87\x6F\xDD\x81\xA3"
"\xB1\xD3\x44\x65\xDF\xE7\x63\x38",
.ilen = 64,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
"\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
"\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
"\x51\xE8\x5C\xF3\x8A\x21\x95\x2C",
.rlen = 64,
},
};
static struct cipher_testvec tf_ctr_enc_tv_template[] = {
{ /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
.input = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
"\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
"\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
"\x51\xE8\x5C\xF3\x8A\x21\x95\x2C",
.ilen = 64,
.result = "\xDF\xDD\x69\xFA\xB0\x2E\xFD\xFE"
"\x70\x9E\xC5\x4B\xC9\xD4\xA1\x30"
"\x26\x9B\x89\xA1\xEE\x43\xE0\x52"
"\x55\x17\x4E\xC7\x0E\x33\x1F\xF1"
"\x9F\x8D\x40\x9F\x24\xFD\x92\xA0"
"\xBC\x8F\x35\xDD\x67\x38\xD8\xAA"
"\xCF\xF8\x48\xCA\xFB\xE4\x5C\x60"
"\x01\x41\x21\x12\x38\xAB\x52\x4F",
.rlen = 64,
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
.input = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
"\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
"\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
"\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
"\xC3\x37\xCE",
.ilen = 67,
.result = "\xDF\xDD\x69\xFA\xB0\x2E\xFD\xFE"
"\x70\x9E\xC5\x4B\xC9\xD4\xA1\x30"
"\x26\x9B\x89\xA1\xEE\x43\xE0\x52"
"\x55\x17\x4E\xC7\x0E\x33\x1F\xF1"
"\x9F\x8D\x40\x9F\x24\xFD\x92\xA0"
"\xBC\x8F\x35\xDD\x67\x38\xD8\xAA"
"\xCF\xF8\x48\xCA\xFB\xE4\x5C\x60"
"\x01\x41\x21\x12\x38\xAB\x52\x4F"
"\xA8\x57\x20",
.rlen = 67,
},
};
static struct cipher_testvec tf_ctr_dec_tv_template[] = {
{ /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
.input = "\xDF\xDD\x69\xFA\xB0\x2E\xFD\xFE"
"\x70\x9E\xC5\x4B\xC9\xD4\xA1\x30"
"\x26\x9B\x89\xA1\xEE\x43\xE0\x52"
"\x55\x17\x4E\xC7\x0E\x33\x1F\xF1"
"\x9F\x8D\x40\x9F\x24\xFD\x92\xA0"
"\xBC\x8F\x35\xDD\x67\x38\xD8\xAA"
"\xCF\xF8\x48\xCA\xFB\xE4\x5C\x60"
"\x01\x41\x21\x12\x38\xAB\x52\x4F",
.ilen = 64,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
"\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
"\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
"\x51\xE8\x5C\xF3\x8A\x21\x95\x2C",
.rlen = 64,
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
"\x78\xBE\x9B\x78\x55\x32\x0F\x55",
.klen = 32,
.iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
"\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
.input = "\xDF\xDD\x69\xFA\xB0\x2E\xFD\xFE"
"\x70\x9E\xC5\x4B\xC9\xD4\xA1\x30"
"\x26\x9B\x89\xA1\xEE\x43\xE0\x52"
"\x55\x17\x4E\xC7\x0E\x33\x1F\xF1"
"\x9F\x8D\x40\x9F\x24\xFD\x92\xA0"
"\xBC\x8F\x35\xDD\x67\x38\xD8\xAA"
"\xCF\xF8\x48\xCA\xFB\xE4\x5C\x60"
"\x01\x41\x21\x12\x38\xAB\x52\x4F"
"\xA8\x57\x20",
.ilen = 67,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
"\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
"\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
"\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
"\xC3\x37\xCE",
.rlen = 67,
},
};
......
......@@ -762,11 +762,17 @@ static const u64 C7[256] = {
0x86228644a411c286ULL,
};
static const u64 rc[WHIRLPOOL_ROUNDS + 1] = {
0x0000000000000000ULL, 0x1823c6e887b8014fULL, 0x36a6d2f5796f9152ULL,
0x60bc9b8ea30c7b35ULL, 0x1de0d7c22e4bfe57ULL, 0x157737e59ff04adaULL,
0x58c9290ab1a06b85ULL, 0xbd5d10f4cb3e0567ULL, 0xe427418ba77d95d8ULL,
0xfbee7c66dd17479eULL, 0xca2dbf07ad5a8333ULL,
static const u64 rc[WHIRLPOOL_ROUNDS] = {
0x1823c6e887b8014fULL,
0x36a6d2f5796f9152ULL,
0x60bc9b8ea30c7b35ULL,
0x1de0d7c22e4bfe57ULL,
0x157737e59ff04adaULL,
0x58c9290ab1a06b85ULL,
0xbd5d10f4cb3e0567ULL,
0xe427418ba77d95d8ULL,
0xfbee7c66dd17479eULL,
0xca2dbf07ad5a8333ULL,
};
/**
......@@ -793,7 +799,7 @@ static void wp512_process_buffer(struct wp512_ctx *wctx) {
state[6] = block[6] ^ (K[6] = wctx->hash[6]);
state[7] = block[7] ^ (K[7] = wctx->hash[7]);
for (r = 1; r <= WHIRLPOOL_ROUNDS; r++) {
for (r = 0; r < WHIRLPOOL_ROUNDS; r++) {
L[0] = C0[(int)(K[0] >> 56) ] ^
C1[(int)(K[7] >> 48) & 0xff] ^
......
......@@ -200,6 +200,7 @@ config CRYPTO_DEV_HIFN_795X
select CRYPTO_BLKCIPHER
select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG
depends on PCI
depends on !ARCH_DMA_ADDR_T_64BIT
help
This option allows you to have support for HIFN 795x crypto adapters.
......@@ -266,7 +267,7 @@ config CRYPTO_DEV_OMAP_AES
config CRYPTO_DEV_PICOXCELL
tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
depends on ARCH_PICOXCELL
depends on ARCH_PICOXCELL && HAVE_CLK
select CRYPTO_AES
select CRYPTO_AUTHENC
select CRYPTO_ALGAPI
......
......@@ -2744,10 +2744,8 @@ static int __init hifn_init(void)
unsigned int freq;
int err;
if (sizeof(dma_addr_t) > 4) {
printk(KERN_INFO "HIFN supports only 32-bit addresses.\n");
return -EINVAL;
}
/* HIFN supports only 32-bit addresses */
BUILD_BUG_ON(sizeof(dma_addr_t) != 4);
if (strncmp(hifn_pll_ref, "ext", 3) &&
strncmp(hifn_pll_ref, "pci", 3)) {
......
......@@ -1006,9 +1006,9 @@ static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
spin_unlock_irqrestore(&qp->lock, flags);
out:
put_cpu();
out:
n2_chunk_complete(req, NULL);
return err;
}
......@@ -1096,9 +1096,9 @@ static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
spin_unlock_irqrestore(&qp->lock, flags);
out:
put_cpu();
out:
n2_chunk_complete(req, err ? NULL : final_iv_addr);
return err;
}
......
......@@ -508,10 +508,8 @@ static int __init padlock_init(void)
int ret;
struct cpuinfo_x86 *c = &cpu_data(0);
if (!cpu_has_xcrypt) {
printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
if (!cpu_has_xcrypt)
return -ENODEV;
}
if (!cpu_has_xcrypt_enabled) {
printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
......
......@@ -34,6 +34,7 @@
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/rtnetlink.h>
......@@ -1241,8 +1242,8 @@ static void spacc_spacc_complete(unsigned long data)
spin_unlock_irqrestore(&engine->hw_lock, flags);
list_for_each_entry_safe(req, tmp, &completed, list) {
req->complete(req);
list_del(&req->list);
req->complete(req);
}
}
......@@ -1657,10 +1658,33 @@ static struct spacc_alg l2_engine_algs[] = {
},
};
static int __devinit spacc_probe(struct platform_device *pdev,
unsigned max_ctxs, size_t cipher_pg_sz,
size_t hash_pg_sz, size_t fifo_sz,
struct spacc_alg *algs, size_t num_algs)
#ifdef CONFIG_OF
static const struct of_device_id spacc_of_id_table[] = {
{ .compatible = "picochip,spacc-ipsec" },
{ .compatible = "picochip,spacc-l2" },
{}
};
#else /* CONFIG_OF */
#define spacc_of_id_table NULL
#endif /* CONFIG_OF */
static bool spacc_is_compatible(struct platform_device *pdev,
const char *spacc_type)
{
const struct platform_device_id *platid = platform_get_device_id(pdev);
if (platid && !strcmp(platid->name, spacc_type))
return true;
#ifdef CONFIG_OF
if (of_device_is_compatible(pdev->dev.of_node, spacc_type))
return true;
#endif /* CONFIG_OF */
return false;
}
static int __devinit spacc_probe(struct platform_device *pdev)
{
int i, err, ret = -EINVAL;
struct resource *mem, *irq;
......@@ -1669,13 +1693,25 @@ static int __devinit spacc_probe(struct platform_device *pdev,
if (!engine)
return -ENOMEM;
engine->max_ctxs = max_ctxs;
engine->cipher_pg_sz = cipher_pg_sz;
engine->hash_pg_sz = hash_pg_sz;
engine->fifo_sz = fifo_sz;
engine->algs = algs;
engine->num_algs = num_algs;
engine->name = dev_name(&pdev->dev);
if (spacc_is_compatible(pdev, "picochip,spacc-ipsec")) {
engine->max_ctxs = SPACC_CRYPTO_IPSEC_MAX_CTXS;
engine->cipher_pg_sz = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ;
engine->hash_pg_sz = SPACC_CRYPTO_IPSEC_HASH_PG_SZ;
engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ;
engine->algs = ipsec_engine_algs;
engine->num_algs = ARRAY_SIZE(ipsec_engine_algs);
} else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) {
engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS;
engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ;
engine->hash_pg_sz = SPACC_CRYPTO_L2_HASH_PG_SZ;
engine->fifo_sz = SPACC_CRYPTO_L2_FIFO_SZ;
engine->algs = l2_engine_algs;
engine->num_algs = ARRAY_SIZE(l2_engine_algs);
} else {
return -EINVAL;
}
engine->name = dev_name(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
......@@ -1711,7 +1747,7 @@ static int __devinit spacc_probe(struct platform_device *pdev,
spin_lock_init(&engine->hw_lock);
engine->clk = clk_get(&pdev->dev, NULL);
engine->clk = clk_get(&pdev->dev, "ref");
if (IS_ERR(engine->clk)) {
dev_info(&pdev->dev, "clk unavailable\n");
device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
......@@ -1800,72 +1836,33 @@ static int __devexit spacc_remove(struct platform_device *pdev)
return 0;
}
static int __devinit ipsec_probe(struct platform_device *pdev)
{
return spacc_probe(pdev, SPACC_CRYPTO_IPSEC_MAX_CTXS,
SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ,
SPACC_CRYPTO_IPSEC_HASH_PG_SZ,
SPACC_CRYPTO_IPSEC_FIFO_SZ, ipsec_engine_algs,
ARRAY_SIZE(ipsec_engine_algs));
}
static struct platform_driver ipsec_driver = {
.probe = ipsec_probe,
.remove = __devexit_p(spacc_remove),
.driver = {
.name = "picoxcell-ipsec",
#ifdef CONFIG_PM
.pm = &spacc_pm_ops,
#endif /* CONFIG_PM */
},
static const struct platform_device_id spacc_id_table[] = {
{ "picochip,spacc-ipsec", },
{ "picochip,spacc-l2", },
};
static int __devinit l2_probe(struct platform_device *pdev)
{
return spacc_probe(pdev, SPACC_CRYPTO_L2_MAX_CTXS,
SPACC_CRYPTO_L2_CIPHER_PG_SZ,
SPACC_CRYPTO_L2_HASH_PG_SZ, SPACC_CRYPTO_L2_FIFO_SZ,
l2_engine_algs, ARRAY_SIZE(l2_engine_algs));
}
static struct platform_driver l2_driver = {
.probe = l2_probe,
static struct platform_driver spacc_driver = {
.probe = spacc_probe,
.remove = __devexit_p(spacc_remove),
.driver = {
.name = "picoxcell-l2",
.name = "picochip,spacc",
#ifdef CONFIG_PM
.pm = &spacc_pm_ops,
#endif /* CONFIG_PM */
.of_match_table = spacc_of_id_table,
},
.id_table = spacc_id_table,
};
static int __init spacc_init(void)
{
int ret = platform_driver_register(&ipsec_driver);
if (ret) {
pr_err("failed to register ipsec spacc driver");
goto out;
}
ret = platform_driver_register(&l2_driver);
if (ret) {
pr_err("failed to register l2 spacc driver");
goto l2_failed;
}
return 0;
l2_failed:
platform_driver_unregister(&ipsec_driver);
out:
return ret;
return platform_driver_register(&spacc_driver);
}
module_init(spacc_init);
static void __exit spacc_exit(void)
{
platform_driver_unregister(&ipsec_driver);
platform_driver_unregister(&l2_driver);
platform_driver_unregister(&spacc_driver);
}
module_exit(spacc_exit);
......
......@@ -416,7 +416,7 @@ static void talitos_done(unsigned long data)
/*
* locate current (offending) descriptor
*/
static struct talitos_desc *current_desc(struct device *dev, int ch)
static u32 current_desc_hdr(struct device *dev, int ch)
{
struct talitos_private *priv = dev_get_drvdata(dev);
int tail = priv->chan[ch].tail;
......@@ -428,23 +428,25 @@ static struct talitos_desc *current_desc(struct device *dev, int ch)
tail = (tail + 1) & (priv->fifo_len - 1);
if (tail == priv->chan[ch].tail) {
dev_err(dev, "couldn't locate current descriptor\n");
return NULL;
return 0;
}
}
return priv->chan[ch].fifo[tail].desc;
return priv->chan[ch].fifo[tail].desc->hdr;
}
/*
* user diagnostics; report root cause of error based on execution unit status
*/
static void report_eu_error(struct device *dev, int ch,
struct talitos_desc *desc)
static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
{
struct talitos_private *priv = dev_get_drvdata(dev);
int i;
switch (desc->hdr & DESC_HDR_SEL0_MASK) {
if (!desc_hdr)
desc_hdr = in_be32(priv->reg + TALITOS_DESCBUF(ch));
switch (desc_hdr & DESC_HDR_SEL0_MASK) {
case DESC_HDR_SEL0_AFEU:
dev_err(dev, "AFEUISR 0x%08x_%08x\n",
in_be32(priv->reg + TALITOS_AFEUISR),
......@@ -488,7 +490,7 @@ static void report_eu_error(struct device *dev, int ch,
break;
}
switch (desc->hdr & DESC_HDR_SEL1_MASK) {
switch (desc_hdr & DESC_HDR_SEL1_MASK) {
case DESC_HDR_SEL1_MDEUA:
case DESC_HDR_SEL1_MDEUB:
dev_err(dev, "MDEUISR 0x%08x_%08x\n",
......@@ -550,7 +552,7 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
if (v_lo & TALITOS_CCPSR_LO_IEU)
dev_err(dev, "invalid execution unit error\n");
if (v_lo & TALITOS_CCPSR_LO_EU)
report_eu_error(dev, ch, current_desc(dev, ch));
report_eu_error(dev, ch, current_desc_hdr(dev, ch));
if (v_lo & TALITOS_CCPSR_LO_GB)
dev_err(dev, "gather boundary error\n");
if (v_lo & TALITOS_CCPSR_LO_GRL)
......
......@@ -15,6 +15,7 @@
#include <linux/crypto.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
struct module;
struct rtattr;
......@@ -26,6 +27,7 @@ struct crypto_type {
int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
int (*init_tfm)(struct crypto_tfm *tfm);
void (*show)(struct seq_file *m, struct crypto_alg *alg);
int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
unsigned int type;
......
/*
* Common values for blowfish algorithms
*/
#ifndef _CRYPTO_BLOWFISH_H
#define _CRYPTO_BLOWFISH_H
#include <linux/types.h>
#include <linux/crypto.h>
#define BF_BLOCK_SIZE 8
#define BF_MIN_KEY_SIZE 4
#define BF_MAX_KEY_SIZE 56
struct bf_ctx {
u32 p[18];
u32 s[1024];
};
int blowfish_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len);
#endif
......@@ -82,4 +82,9 @@ struct sha512_state {
u8 buf[SHA512_BLOCK_SIZE];
};
struct shash_desc;
extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
#endif
......@@ -71,6 +71,11 @@
#define CRYPTO_ALG_TESTED 0x00000400
/*
* Set if the algorithm is an instance that is build from templates.
*/
#define CRYPTO_ALG_INSTANCE 0x00000800
/*
* Transform masks and values (for crt_flags).
*/
......
/*
* Crypto user configuration API.
*
* Copyright (C) 2011 secunet Security Networks AG
* Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* Netlink configuration messages. */
enum {
CRYPTO_MSG_BASE = 0x10,
CRYPTO_MSG_NEWALG = 0x10,
CRYPTO_MSG_DELALG,
CRYPTO_MSG_UPDATEALG,
CRYPTO_MSG_GETALG,
__CRYPTO_MSG_MAX
};
#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
#define CRYPTO_NR_MSGTYPES (CRYPTO_MSG_MAX + 1 - CRYPTO_MSG_BASE)
#define CRYPTO_MAX_NAME CRYPTO_MAX_ALG_NAME
/* Netlink message attributes. */
enum crypto_attr_type_t {
CRYPTOCFGA_UNSPEC,
CRYPTOCFGA_PRIORITY_VAL, /* __u32 */
CRYPTOCFGA_REPORT_LARVAL, /* struct crypto_report_larval */
CRYPTOCFGA_REPORT_HASH, /* struct crypto_report_hash */
CRYPTOCFGA_REPORT_BLKCIPHER, /* struct crypto_report_blkcipher */
CRYPTOCFGA_REPORT_AEAD, /* struct crypto_report_aead */
CRYPTOCFGA_REPORT_COMPRESS, /* struct crypto_report_comp */
CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */
CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
__CRYPTOCFGA_MAX
#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
};
struct crypto_user_alg {
char cru_name[CRYPTO_MAX_ALG_NAME];
char cru_driver_name[CRYPTO_MAX_ALG_NAME];
char cru_module_name[CRYPTO_MAX_ALG_NAME];
__u32 cru_type;
__u32 cru_mask;
__u32 cru_refcnt;
__u32 cru_flags;
};
struct crypto_report_larval {
char type[CRYPTO_MAX_NAME];
};
struct crypto_report_hash {
char type[CRYPTO_MAX_NAME];
unsigned int blocksize;
unsigned int digestsize;
};
struct crypto_report_cipher {
char type[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
unsigned int min_keysize;
unsigned int max_keysize;
};
struct crypto_report_blkcipher {
char type[CRYPTO_MAX_NAME];
char geniv[CRYPTO_MAX_NAME];
unsigned int blocksize;
unsigned int min_keysize;
unsigned int max_keysize;
unsigned int ivsize;
};
struct crypto_report_aead {
char type[CRYPTO_MAX_NAME];
char geniv[CRYPTO_MAX_NAME];
unsigned int blocksize;
unsigned int maxauthsize;
unsigned int ivsize;
};
struct crypto_report_comp {
char type[CRYPTO_MAX_NAME];
};
struct crypto_report_rng {
char type[CRYPTO_MAX_NAME];
unsigned int seedsize;
};
......@@ -25,6 +25,7 @@
#define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */
#define NETLINK_ECRYPTFS 19
#define NETLINK_RDMA 20
#define NETLINK_CRYPTO 21 /* Crypto layer */
#define MAX_LINKS 32
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment