Commit 05249755 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (45 commits)
  crypto: caam - add support for sha512 variants of existing AEAD algorithms
  crypto: caam - remove unused authkeylen from caam_ctx
  crypto: caam - fix decryption shared vs. non-shared key setting
  crypto: caam - platform_bus_type migration
  crypto: aesni-intel - fix aesni build on i386
  crypto: aesni-intel - Merge with fpu.ko
  crypto: mv_cesa - make count_sgs() null-pointer proof
  crypto: mv_cesa - copy remaining bytes to SRAM only when needed
  crypto: mv_cesa - move digest state initialisation to a better place
  crypto: mv_cesa - fill inner/outer IV fields only in HMAC case
  crypto: mv_cesa - refactor copy_src_to_buf()
  crypto: mv_cesa - no need to save digest state after the last chunk
  crypto: mv_cesa - print a warning when registration of AES algos fail
  crypto: mv_cesa - drop this call to mv_hash_final from mv_hash_finup
  crypto: mv_cesa - the descriptor pointer register needs to be set just once
  crypto: mv_cesa - use ablkcipher_request_cast instead of the manual container_of
  crypto: caam - fix printk recursion for long error texts
  crypto: caam - remove unused keylen from session context
  hwrng: amd - enable AMD hw rnd driver for Maple PPC boards
  hwrng: amd - manage resource allocation
  ...
parents cae13fe4 4427b1b4
This diff is collapsed.
/* /*
* P4080DS Device Tree Source * P4080DS Device Tree Source
* *
* Copyright 2009 Freescale Semiconductor Inc. * Copyright 2009-2011 Freescale Semiconductor Inc.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the * under the terms of the GNU General Public License as published by the
...@@ -33,6 +33,17 @@ aliases { ...@@ -33,6 +33,17 @@ aliases {
dma1 = &dma1; dma1 = &dma1;
sdhc = &sdhc; sdhc = &sdhc;
crypto = &crypto;
sec_jr0 = &sec_jr0;
sec_jr1 = &sec_jr1;
sec_jr2 = &sec_jr2;
sec_jr3 = &sec_jr3;
rtic_a = &rtic_a;
rtic_b = &rtic_b;
rtic_c = &rtic_c;
rtic_d = &rtic_d;
sec_mon = &sec_mon;
rio0 = &rapidio0; rio0 = &rapidio0;
}; };
...@@ -410,6 +421,79 @@ usb1: usb@211000 { ...@@ -410,6 +421,79 @@ usb1: usb@211000 {
dr_mode = "host"; dr_mode = "host";
phy_type = "ulpi"; phy_type = "ulpi";
}; };
crypto: crypto@300000 {
compatible = "fsl,sec-v4.0";
#address-cells = <1>;
#size-cells = <1>;
reg = <0x300000 0x10000>;
ranges = <0 0x300000 0x10000>;
interrupt-parent = <&mpic>;
interrupts = <92 2>;
sec_jr0: jr@1000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x1000 0x1000>;
interrupt-parent = <&mpic>;
interrupts = <88 2>;
};
sec_jr1: jr@2000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x2000 0x1000>;
interrupt-parent = <&mpic>;
interrupts = <89 2>;
};
sec_jr2: jr@3000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x3000 0x1000>;
interrupt-parent = <&mpic>;
interrupts = <90 2>;
};
sec_jr3: jr@4000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x4000 0x1000>;
interrupt-parent = <&mpic>;
interrupts = <91 2>;
};
rtic@6000 {
compatible = "fsl,sec-v4.0-rtic";
#address-cells = <1>;
#size-cells = <1>;
reg = <0x6000 0x100>;
ranges = <0x0 0x6100 0xe00>;
rtic_a: rtic-a@0 {
compatible = "fsl,sec-v4.0-rtic-memory";
reg = <0x00 0x20 0x100 0x80>;
};
rtic_b: rtic-b@20 {
compatible = "fsl,sec-v4.0-rtic-memory";
reg = <0x20 0x20 0x200 0x80>;
};
rtic_c: rtic-c@40 {
compatible = "fsl,sec-v4.0-rtic-memory";
reg = <0x40 0x20 0x300 0x80>;
};
rtic_d: rtic-d@60 {
compatible = "fsl,sec-v4.0-rtic-memory";
reg = <0x60 0x20 0x500 0x80>;
};
};
};
sec_mon: sec_mon@314000 {
compatible = "fsl,sec-v4.0-mon";
reg = <0x314000 0x1000>;
interrupt-parent = <&mpic>;
interrupts = <93 2>;
};
}; };
rapidio0: rapidio@ffe0c0000 { rapidio0: rapidio@ffe0c0000 {
......
...@@ -8,3 +8,4 @@ obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o ...@@ -8,3 +8,4 @@ obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o obj-$(CONFIG_S390_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
This diff is collapsed.
...@@ -24,13 +24,18 @@ ...@@ -24,13 +24,18 @@
#define CRYPT_S390_PRIORITY 300 #define CRYPT_S390_PRIORITY 300
#define CRYPT_S390_COMPOSITE_PRIORITY 400 #define CRYPT_S390_COMPOSITE_PRIORITY 400
#define CRYPT_S390_MSA 0x1
#define CRYPT_S390_MSA3 0x2
#define CRYPT_S390_MSA4 0x4
/* s390 cryptographic operations */ /* s390 cryptographic operations */
enum crypt_s390_operations { enum crypt_s390_operations {
CRYPT_S390_KM = 0x0100, CRYPT_S390_KM = 0x0100,
CRYPT_S390_KMC = 0x0200, CRYPT_S390_KMC = 0x0200,
CRYPT_S390_KIMD = 0x0300, CRYPT_S390_KIMD = 0x0300,
CRYPT_S390_KLMD = 0x0400, CRYPT_S390_KLMD = 0x0400,
CRYPT_S390_KMAC = 0x0500 CRYPT_S390_KMAC = 0x0500,
CRYPT_S390_KMCTR = 0x0600
}; };
/* /*
...@@ -51,6 +56,10 @@ enum crypt_s390_km_func { ...@@ -51,6 +56,10 @@ enum crypt_s390_km_func {
KM_AES_192_DECRYPT = CRYPT_S390_KM | 0x13 | 0x80, KM_AES_192_DECRYPT = CRYPT_S390_KM | 0x13 | 0x80,
KM_AES_256_ENCRYPT = CRYPT_S390_KM | 0x14, KM_AES_256_ENCRYPT = CRYPT_S390_KM | 0x14,
KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80, KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80,
KM_XTS_128_ENCRYPT = CRYPT_S390_KM | 0x32,
KM_XTS_128_DECRYPT = CRYPT_S390_KM | 0x32 | 0x80,
KM_XTS_256_ENCRYPT = CRYPT_S390_KM | 0x34,
KM_XTS_256_DECRYPT = CRYPT_S390_KM | 0x34 | 0x80,
}; };
/* /*
...@@ -74,6 +83,26 @@ enum crypt_s390_kmc_func { ...@@ -74,6 +83,26 @@ enum crypt_s390_kmc_func {
KMC_PRNG = CRYPT_S390_KMC | 0x43, KMC_PRNG = CRYPT_S390_KMC | 0x43,
}; };
/*
* function codes for KMCTR (CIPHER MESSAGE WITH COUNTER)
* instruction
*/
enum crypt_s390_kmctr_func {
KMCTR_QUERY = CRYPT_S390_KMCTR | 0x0,
KMCTR_DEA_ENCRYPT = CRYPT_S390_KMCTR | 0x1,
KMCTR_DEA_DECRYPT = CRYPT_S390_KMCTR | 0x1 | 0x80,
KMCTR_TDEA_128_ENCRYPT = CRYPT_S390_KMCTR | 0x2,
KMCTR_TDEA_128_DECRYPT = CRYPT_S390_KMCTR | 0x2 | 0x80,
KMCTR_TDEA_192_ENCRYPT = CRYPT_S390_KMCTR | 0x3,
KMCTR_TDEA_192_DECRYPT = CRYPT_S390_KMCTR | 0x3 | 0x80,
KMCTR_AES_128_ENCRYPT = CRYPT_S390_KMCTR | 0x12,
KMCTR_AES_128_DECRYPT = CRYPT_S390_KMCTR | 0x12 | 0x80,
KMCTR_AES_192_ENCRYPT = CRYPT_S390_KMCTR | 0x13,
KMCTR_AES_192_DECRYPT = CRYPT_S390_KMCTR | 0x13 | 0x80,
KMCTR_AES_256_ENCRYPT = CRYPT_S390_KMCTR | 0x14,
KMCTR_AES_256_DECRYPT = CRYPT_S390_KMCTR | 0x14 | 0x80,
};
/* /*
* function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) * function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
* instruction * instruction
...@@ -83,6 +112,7 @@ enum crypt_s390_kimd_func { ...@@ -83,6 +112,7 @@ enum crypt_s390_kimd_func {
KIMD_SHA_1 = CRYPT_S390_KIMD | 1, KIMD_SHA_1 = CRYPT_S390_KIMD | 1,
KIMD_SHA_256 = CRYPT_S390_KIMD | 2, KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
KIMD_SHA_512 = CRYPT_S390_KIMD | 3, KIMD_SHA_512 = CRYPT_S390_KIMD | 3,
KIMD_GHASH = CRYPT_S390_KIMD | 65,
}; };
/* /*
...@@ -283,6 +313,45 @@ static inline int crypt_s390_kmac(long func, void *param, ...@@ -283,6 +313,45 @@ static inline int crypt_s390_kmac(long func, void *param,
return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
} }
/**
* crypt_s390_kmctr:
* @func: the function code passed to KMCTR; see crypt_s390_kmctr_func
* @param: address of parameter block; see POP for details on each func
* @dest: address of destination memory area
* @src: address of source memory area
* @src_len: length of src operand in bytes
* @counter: address of counter value
*
* Executes the KMCTR (CIPHER MESSAGE WITH COUNTER) operation of the CPU.
*
* Returns -1 for failure, 0 for the query func, number of processed
* bytes for encryption/decryption funcs
*/
static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
const u8 *src, long src_len, u8 *counter)
{
register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void *__param asm("1") = param;
register const u8 *__src asm("2") = src;
register long __src_len asm("3") = src_len;
register u8 *__dest asm("4") = dest;
register u8 *__ctr asm("6") = counter;
int ret = -1;
asm volatile(
"0: .insn rrf,0xb92d0000,%3,%1,%4,0 \n" /* KMCTR opcode */
"1: brc 1,0b \n" /* handle partial completion */
" la %0,0\n"
"2:\n"
EX_TABLE(0b,2b) EX_TABLE(1b,2b)
: "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest),
"+a" (__ctr)
: "d" (__func), "a" (__param) : "cc", "memory");
if (ret < 0)
return ret;
return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}
/** /**
* crypt_s390_func_available: * crypt_s390_func_available:
* @func: the function code of the specific function; 0 if op in general * @func: the function code of the specific function; 0 if op in general
...@@ -291,13 +360,17 @@ static inline int crypt_s390_kmac(long func, void *param, ...@@ -291,13 +360,17 @@ static inline int crypt_s390_kmac(long func, void *param,
* *
* Returns 1 if func available; 0 if func or op in general not available * Returns 1 if func available; 0 if func or op in general not available
*/ */
static inline int crypt_s390_func_available(int func) static inline int crypt_s390_func_available(int func,
unsigned int facility_mask)
{ {
unsigned char status[16]; unsigned char status[16];
int ret; int ret;
/* check if CPACF facility (bit 17) is available */ if (facility_mask & CRYPT_S390_MSA && !test_facility(17))
if (!test_facility(17)) return 0;
if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76))
return 0;
if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77))
return 0; return 0;
switch (func & CRYPT_S390_OP_MASK) { switch (func & CRYPT_S390_OP_MASK) {
...@@ -316,6 +389,10 @@ static inline int crypt_s390_func_available(int func) ...@@ -316,6 +389,10 @@ static inline int crypt_s390_func_available(int func)
case CRYPT_S390_KMAC: case CRYPT_S390_KMAC:
ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0); ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
break; break;
case CRYPT_S390_KMCTR:
ret = crypt_s390_kmctr(KMCTR_QUERY, &status, NULL, NULL, 0,
NULL);
break;
default: default:
return 0; return 0;
} }
...@@ -326,4 +403,31 @@ static inline int crypt_s390_func_available(int func) ...@@ -326,4 +403,31 @@ static inline int crypt_s390_func_available(int func)
return (status[func >> 3] & (0x80 >> (func & 7))) != 0; return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
} }
/**
* crypt_s390_pcc:
* @func: the function code passed to KM; see crypt_s390_km_func
* @param: address of parameter block; see POP for details on each func
*
* Executes the PCC (PERFORM CRYPTOGRAPHIC COMPUTATION) operation of the CPU.
*
* Returns -1 for failure, 0 for success.
*/
static inline int crypt_s390_pcc(long func, void *param)
{
register long __func asm("0") = func & 0x7f; /* encrypt or decrypt */
register void *__param asm("1") = param;
int ret = -1;
asm volatile(
"0: .insn rre,0xb92c0000,0,0 \n" /* PCC opcode */
"1: brc 1,0b \n" /* handle partial completion */
" la %0,0\n"
"2:\n"
EX_TABLE(0b,2b) EX_TABLE(1b,2b)
: "+d" (ret)
: "d" (__func), "a" (__param) : "cc", "memory");
return ret;
}
#endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */ #endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */
/*
* Cryptographic API.
*
* Function for checking keys for the DES and Tripple DES Encryption
* algorithms.
*
* Originally released as descore by Dana L. How <how@isl.stanford.edu>.
* Modified by Raimar Falke <rf13@inf.tu-dresden.de> for the Linux-Kernel.
* Derived from Cryptoapi and Nettle implementations, adapted for in-place
* scatterlist interface. Changed LGPL to GPL per section 3 of the LGPL.
*
* s390 Version:
* Copyright IBM Corp. 2003
* Author(s): Thomas Spatzier
* Jan Glauber (jan.glauber@de.ibm.com)
*
* Derived from "crypto/des.c"
* Copyright (c) 1992 Dana L. How.
* Copyright (c) Raimar Falke <rf13@inf.tu-dresden.de>
* Copyright (c) Gisle Sflensminde <gisle@ii.uib.no>
* Copyright (C) 2001 Niels Mvller.
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/crypto.h>
#include "crypto_des.h"
#define ROR(d,c,o) ((d) = (d) >> (c) | (d) << (o))
static const u8 parity[] = {
8,1,0,8,0,8,8,0,0,8,8,0,8,0,2,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,3,
0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,
0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,
8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,
0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,
8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,
8,0,0,8,0,8,8,0,0,8,8,0,8,0,0,8,0,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,
4,8,8,0,8,0,0,8,8,0,0,8,0,8,8,0,8,5,0,8,0,8,8,0,0,8,8,0,8,0,6,8,
};
/*
* RFC2451: Weak key checks SHOULD be performed.
*/
int
crypto_des_check_key(const u8 *key, unsigned int keylen, u32 *flags)
{
u32 n, w;
n = parity[key[0]]; n <<= 4;
n |= parity[key[1]]; n <<= 4;
n |= parity[key[2]]; n <<= 4;
n |= parity[key[3]]; n <<= 4;
n |= parity[key[4]]; n <<= 4;
n |= parity[key[5]]; n <<= 4;
n |= parity[key[6]]; n <<= 4;
n |= parity[key[7]];
w = 0x88888888L;
if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY)
&& !((n - (w >> 3)) & w)) { /* 1 in 10^10 keys passes this test */
if (n < 0x41415151) {
if (n < 0x31312121) {
if (n < 0x14141515) {
/* 01 01 01 01 01 01 01 01 */
if (n == 0x11111111) goto weak;
/* 01 1F 01 1F 01 0E 01 0E */
if (n == 0x13131212) goto weak;
} else {
/* 01 E0 01 E0 01 F1 01 F1 */
if (n == 0x14141515) goto weak;
/* 01 FE 01 FE 01 FE 01 FE */
if (n == 0x16161616) goto weak;
}
} else {
if (n < 0x34342525) {
/* 1F 01 1F 01 0E 01 0E 01 */
if (n == 0x31312121) goto weak;
/* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
if (n == 0x33332222) goto weak;
} else {
/* 1F E0 1F E0 0E F1 0E F1 */
if (n == 0x34342525) goto weak;
/* 1F FE 1F FE 0E FE 0E FE */
if (n == 0x36362626) goto weak;
}
}
} else {
if (n < 0x61616161) {
if (n < 0x44445555) {
/* E0 01 E0 01 F1 01 F1 01 */
if (n == 0x41415151) goto weak;
/* E0 1F E0 1F F1 0E F1 0E */
if (n == 0x43435252) goto weak;
} else {
/* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
if (n == 0x44445555) goto weak;
/* E0 FE E0 FE F1 FE F1 FE */
if (n == 0x46465656) goto weak;
}
} else {
if (n < 0x64646565) {
/* FE 01 FE 01 FE 01 FE 01 */
if (n == 0x61616161) goto weak;
/* FE 1F FE 1F FE 0E FE 0E */
if (n == 0x63636262) goto weak;
} else {
/* FE E0 FE E0 FE F1 FE F1 */
if (n == 0x64646565) goto weak;
/* FE FE FE FE FE FE FE FE */
if (n == 0x66666666) goto weak;
}
}
}
}
return 0;
weak:
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
EXPORT_SYMBOL(crypto_des_check_key);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Key Check function for DES & DES3 Cipher Algorithms");
This diff is collapsed.
/*
* Cryptographic API.
*
* s390 implementation of the GHASH algorithm for GCM (Galois/Counter Mode).
*
* Copyright IBM Corp. 2011
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
#include <crypto/internal/hash.h>
#include <linux/module.h>
#include "crypt_s390.h"
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
struct ghash_ctx {
u8 icv[16];
u8 key[16];
};
struct ghash_desc_ctx {
u8 buffer[GHASH_BLOCK_SIZE];
u32 bytes;
};
static int ghash_init(struct shash_desc *desc)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
memset(dctx, 0, sizeof(*dctx));
return 0;
}
static int ghash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
if (keylen != GHASH_BLOCK_SIZE) {
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
return 0;
}
static int ghash_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
unsigned int n;
u8 *buf = dctx->buffer;
int ret;
if (dctx->bytes) {
u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
n = min(srclen, dctx->bytes);
dctx->bytes -= n;
srclen -= n;
memcpy(pos, src, n);
src += n;
if (!dctx->bytes) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
GHASH_BLOCK_SIZE);
BUG_ON(ret != GHASH_BLOCK_SIZE);
}
}
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
BUG_ON(ret != n);
src += n;
srclen -= n;
}
if (srclen) {
dctx->bytes = GHASH_BLOCK_SIZE - srclen;
memcpy(buf, src, srclen);
}
return 0;
}
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
{
u8 *buf = dctx->buffer;
int ret;
if (dctx->bytes) {
u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
memset(pos, 0, dctx->bytes);
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
BUG_ON(ret != GHASH_BLOCK_SIZE);
}
dctx->bytes = 0;
}
static int ghash_final(struct shash_desc *desc, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
ghash_flush(ctx, dctx);
memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
return 0;
}
static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
.final = ghash_final,
.setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list),
},
};
static int __init ghash_mod_init(void)
{
if (!crypt_s390_func_available(KIMD_GHASH,
CRYPT_S390_MSA | CRYPT_S390_MSA4))
return -EOPNOTSUPP;
return crypto_register_shash(&ghash_alg);
}
static void __exit ghash_mod_exit(void)
{
crypto_unregister_shash(&ghash_alg);
}
module_init(ghash_mod_init);
module_exit(ghash_mod_exit);
MODULE_ALIAS("ghash");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation");
...@@ -166,7 +166,7 @@ static int __init prng_init(void) ...@@ -166,7 +166,7 @@ static int __init prng_init(void)
int ret; int ret;
/* check if the CPU has a PRNG */ /* check if the CPU has a PRNG */
if (!crypt_s390_func_available(KMC_PRNG)) if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (prng_chunk_size < 8) if (prng_chunk_size < 8)
......
...@@ -90,7 +90,7 @@ static struct shash_alg alg = { ...@@ -90,7 +90,7 @@ static struct shash_alg alg = {
static int __init sha1_s390_init(void) static int __init sha1_s390_init(void)
{ {
if (!crypt_s390_func_available(KIMD_SHA_1)) if (!crypt_s390_func_available(KIMD_SHA_1, CRYPT_S390_MSA))
return -EOPNOTSUPP; return -EOPNOTSUPP;
return crypto_register_shash(&alg); return crypto_register_shash(&alg);
} }
......
...@@ -86,7 +86,7 @@ static struct shash_alg alg = { ...@@ -86,7 +86,7 @@ static struct shash_alg alg = {
static int sha256_s390_init(void) static int sha256_s390_init(void)
{ {
if (!crypt_s390_func_available(KIMD_SHA_256)) if (!crypt_s390_func_available(KIMD_SHA_256, CRYPT_S390_MSA))
return -EOPNOTSUPP; return -EOPNOTSUPP;
return crypto_register_shash(&alg); return crypto_register_shash(&alg);
......
...@@ -132,7 +132,7 @@ static int __init init(void) ...@@ -132,7 +132,7 @@ static int __init init(void)
{ {
int ret; int ret;
if (!crypt_s390_func_available(KIMD_SHA_512)) if (!crypt_s390_func_available(KIMD_SHA_512, CRYPT_S390_MSA))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if ((ret = crypto_register_shash(&sha512_alg)) < 0) if ((ret = crypto_register_shash(&sha512_alg)) < 0)
goto out; goto out;
......
...@@ -2,8 +2,6 @@ ...@@ -2,8 +2,6 @@
# Arch-specific CryptoAPI modules. # Arch-specific CryptoAPI modules.
# #
obj-$(CONFIG_CRYPTO_FPU) += fpu.o
obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
...@@ -24,6 +22,6 @@ aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o ...@@ -24,6 +22,6 @@ aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
...@@ -94,6 +94,10 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, ...@@ -94,6 +94,10 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv); const u8 *in, unsigned int len, u8 *iv);
asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv); const u8 *in, unsigned int len, u8 *iv);
int crypto_fpu_init(void);
void crypto_fpu_exit(void);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv); const u8 *in, unsigned int len, u8 *iv);
...@@ -1257,6 +1261,8 @@ static int __init aesni_init(void) ...@@ -1257,6 +1261,8 @@ static int __init aesni_init(void)
return -ENODEV; return -ENODEV;
} }
if ((err = crypto_fpu_init()))
goto fpu_err;
if ((err = crypto_register_alg(&aesni_alg))) if ((err = crypto_register_alg(&aesni_alg)))
goto aes_err; goto aes_err;
if ((err = crypto_register_alg(&__aesni_alg))) if ((err = crypto_register_alg(&__aesni_alg)))
...@@ -1334,6 +1340,7 @@ static int __init aesni_init(void) ...@@ -1334,6 +1340,7 @@ static int __init aesni_init(void)
__aes_err: __aes_err:
crypto_unregister_alg(&aesni_alg); crypto_unregister_alg(&aesni_alg);
aes_err: aes_err:
fpu_err:
return err; return err;
} }
...@@ -1363,6 +1370,8 @@ static void __exit aesni_exit(void) ...@@ -1363,6 +1370,8 @@ static void __exit aesni_exit(void)
crypto_unregister_alg(&blk_ecb_alg); crypto_unregister_alg(&blk_ecb_alg);
crypto_unregister_alg(&__aesni_alg); crypto_unregister_alg(&__aesni_alg);
crypto_unregister_alg(&aesni_alg); crypto_unregister_alg(&aesni_alg);
crypto_fpu_exit();
} }
module_init(aesni_init); module_init(aesni_init);
......
...@@ -150,18 +150,12 @@ static struct crypto_template crypto_fpu_tmpl = { ...@@ -150,18 +150,12 @@ static struct crypto_template crypto_fpu_tmpl = {
.module = THIS_MODULE, .module = THIS_MODULE,
}; };
static int __init crypto_fpu_module_init(void) int __init crypto_fpu_init(void)
{ {
return crypto_register_template(&crypto_fpu_tmpl); return crypto_register_template(&crypto_fpu_tmpl);
} }
static void __exit crypto_fpu_module_exit(void) void __exit crypto_fpu_exit(void)
{ {
crypto_unregister_template(&crypto_fpu_tmpl); crypto_unregister_template(&crypto_fpu_tmpl);
} }
module_init(crypto_fpu_module_init);
module_exit(crypto_fpu_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FPU block cipher wrapper");
...@@ -264,11 +264,6 @@ config CRYPTO_XTS ...@@ -264,11 +264,6 @@ config CRYPTO_XTS
key size 256, 384 or 512 bits. This implementation currently key size 256, 384 or 512 bits. This implementation currently
can't handle a sectorsize which is not a multiple of 16 bytes. can't handle a sectorsize which is not a multiple of 16 bytes.
config CRYPTO_FPU
tristate
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER
comment "Hash modes" comment "Hash modes"
config CRYPTO_HMAC config CRYPTO_HMAC
...@@ -543,7 +538,6 @@ config CRYPTO_AES_NI_INTEL ...@@ -543,7 +538,6 @@ config CRYPTO_AES_NI_INTEL
select CRYPTO_AES_586 if !64BIT select CRYPTO_AES_586 if !64BIT
select CRYPTO_CRYPTD select CRYPTO_CRYPTD
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_FPU
help help
Use Intel AES-NI instructions for AES algorithm. Use Intel AES-NI instructions for AES algorithm.
......
...@@ -1009,6 +1009,10 @@ static int do_test(int m) ...@@ -1009,6 +1009,10 @@ static int do_test(int m)
speed_template_32_48_64); speed_template_32_48_64);
test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64); speed_template_32_48_64);
test_cipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
break; break;
case 201: case 201:
......
...@@ -2218,6 +2218,22 @@ static const struct alg_test_desc alg_test_descs[] = { ...@@ -2218,6 +2218,22 @@ static const struct alg_test_desc alg_test_descs[] = {
.count = MICHAEL_MIC_TEST_VECTORS .count = MICHAEL_MIC_TEST_VECTORS
} }
} }
}, {
.alg = "ofb(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = {
.enc = {
.vecs = aes_ofb_enc_tv_template,
.count = AES_OFB_ENC_TEST_VECTORS
},
.dec = {
.vecs = aes_ofb_dec_tv_template,
.count = AES_OFB_DEC_TEST_VECTORS
}
}
}
}, { }, {
.alg = "pcbc(fcrypt)", .alg = "pcbc(fcrypt)",
.test = alg_test_skcipher, .test = alg_test_skcipher,
......
...@@ -2980,6 +2980,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = { ...@@ -2980,6 +2980,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = {
#define AES_XTS_DEC_TEST_VECTORS 4 #define AES_XTS_DEC_TEST_VECTORS 4
#define AES_CTR_ENC_TEST_VECTORS 3 #define AES_CTR_ENC_TEST_VECTORS 3
#define AES_CTR_DEC_TEST_VECTORS 3 #define AES_CTR_DEC_TEST_VECTORS 3
#define AES_OFB_ENC_TEST_VECTORS 1
#define AES_OFB_DEC_TEST_VECTORS 1
#define AES_CTR_3686_ENC_TEST_VECTORS 7 #define AES_CTR_3686_ENC_TEST_VECTORS 7
#define AES_CTR_3686_DEC_TEST_VECTORS 6 #define AES_CTR_3686_DEC_TEST_VECTORS 6
#define AES_GCM_ENC_TEST_VECTORS 9 #define AES_GCM_ENC_TEST_VECTORS 9
...@@ -5506,6 +5508,64 @@ static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = { ...@@ -5506,6 +5508,64 @@ static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
}, },
}; };
static struct cipher_testvec aes_ofb_enc_tv_template[] = {
/* From NIST Special Publication 800-38A, Appendix F.5 */
{
.key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
.klen = 16,
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07\x08"
"\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
.ilen = 64,
.result = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
"\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
"\x77\x89\x50\x8d\x16\x91\x8f\x03\xf5"
"\x3c\x52\xda\xc5\x4e\xd8\x25"
"\x97\x40\x05\x1e\x9c\x5f\xec\xf6\x43"
"\x44\xf7\xa8\x22\x60\xed\xcc"
"\x30\x4c\x65\x28\xf6\x59\xc7\x78"
"\x66\xa5\x10\xd9\xc1\xd6\xae\x5e",
.rlen = 64,
}
};
static struct cipher_testvec aes_ofb_dec_tv_template[] = {
/* From NIST Special Publication 800-38A, Appendix F.5 */
{
.key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
.klen = 16,
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07\x08"
"\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.input = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
"\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
"\x77\x89\x50\x8d\x16\x91\x8f\x03\xf5"
"\x3c\x52\xda\xc5\x4e\xd8\x25"
"\x97\x40\x05\x1e\x9c\x5f\xec\xf6\x43"
"\x44\xf7\xa8\x22\x60\xed\xcc"
"\x30\x4c\x65\x28\xf6\x59\xc7\x78"
"\x66\xa5\x10\xd9\xc1\xd6\xae\x5e",
.ilen = 64,
.result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
"\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
"\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
"\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
"\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
.rlen = 64,
}
};
static struct aead_testvec aes_gcm_enc_tv_template[] = { static struct aead_testvec aes_gcm_enc_tv_template[] = {
{ /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
.key = zeroed_string, .key = zeroed_string,
......
...@@ -49,7 +49,7 @@ config HW_RANDOM_INTEL ...@@ -49,7 +49,7 @@ config HW_RANDOM_INTEL
config HW_RANDOM_AMD config HW_RANDOM_AMD
tristate "AMD HW Random Number Generator support" tristate "AMD HW Random Number Generator support"
depends on HW_RANDOM && X86 && PCI depends on HW_RANDOM && (X86 || PPC_MAPLE) && PCI
default HW_RANDOM default HW_RANDOM
---help--- ---help---
This driver provides kernel-side support for the Random Number This driver provides kernel-side support for the Random Number
......
...@@ -133,6 +133,12 @@ static int __init mod_init(void) ...@@ -133,6 +133,12 @@ static int __init mod_init(void)
pmbase &= 0x0000FF00; pmbase &= 0x0000FF00;
if (pmbase == 0) if (pmbase == 0)
goto out; goto out;
if (!request_region(pmbase + 0xF0, 8, "AMD HWRNG")) {
dev_err(&pdev->dev, "AMD HWRNG region 0x%x already in use!\n",
pmbase + 0xF0);
err = -EBUSY;
goto out;
}
amd_rng.priv = (unsigned long)pmbase; amd_rng.priv = (unsigned long)pmbase;
amd_pdev = pdev; amd_pdev = pdev;
...@@ -141,6 +147,7 @@ static int __init mod_init(void) ...@@ -141,6 +147,7 @@ static int __init mod_init(void)
if (err) { if (err) {
printk(KERN_ERR PFX "RNG registering failed (%d)\n", printk(KERN_ERR PFX "RNG registering failed (%d)\n",
err); err);
release_region(pmbase + 0xF0, 8);
goto out; goto out;
} }
out: out:
...@@ -149,6 +156,8 @@ static int __init mod_init(void) ...@@ -149,6 +156,8 @@ static int __init mod_init(void)
static void __exit mod_exit(void) static void __exit mod_exit(void)
{ {
u32 pmbase = (unsigned long)amd_rng.priv;
release_region(pmbase + 0xF0, 8);
hwrng_unregister(&amd_rng); hwrng_unregister(&amd_rng);
} }
......
...@@ -91,6 +91,8 @@ config CRYPTO_SHA1_S390 ...@@ -91,6 +91,8 @@ config CRYPTO_SHA1_S390
This is the s390 hardware accelerated implementation of the This is the s390 hardware accelerated implementation of the
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
It is available as of z990.
config CRYPTO_SHA256_S390 config CRYPTO_SHA256_S390
tristate "SHA256 digest algorithm" tristate "SHA256 digest algorithm"
depends on S390 depends on S390
...@@ -99,8 +101,7 @@ config CRYPTO_SHA256_S390 ...@@ -99,8 +101,7 @@ config CRYPTO_SHA256_S390
This is the s390 hardware accelerated implementation of the This is the s390 hardware accelerated implementation of the
SHA256 secure hash standard (DFIPS 180-2). SHA256 secure hash standard (DFIPS 180-2).
This version of SHA implements a 256 bit hash with 128 bits of It is available as of z9.
security against collision attacks.
config CRYPTO_SHA512_S390 config CRYPTO_SHA512_S390
tristate "SHA384 and SHA512 digest algorithm" tristate "SHA384 and SHA512 digest algorithm"
...@@ -110,10 +111,7 @@ config CRYPTO_SHA512_S390 ...@@ -110,10 +111,7 @@ config CRYPTO_SHA512_S390
This is the s390 hardware accelerated implementation of the This is the s390 hardware accelerated implementation of the
SHA512 secure hash standard. SHA512 secure hash standard.
This version of SHA implements a 512 bit hash with 256 bits of It is available as of z10.
security against collision attacks. The code also includes SHA-384,
a 384 bit hash with 192 bits of security against collision attacks.
config CRYPTO_DES_S390 config CRYPTO_DES_S390
tristate "DES and Triple DES cipher algorithms" tristate "DES and Triple DES cipher algorithms"
...@@ -121,9 +119,12 @@ config CRYPTO_DES_S390 ...@@ -121,9 +119,12 @@ config CRYPTO_DES_S390
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER select CRYPTO_BLKCIPHER
help help
This us the s390 hardware accelerated implementation of the This is the s390 hardware accelerated implementation of the
DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
As of z990 the ECB and CBC mode are hardware accelerated.
As of z196 the CTR mode is hardware accelerated.
config CRYPTO_AES_S390 config CRYPTO_AES_S390
tristate "AES cipher algorithms" tristate "AES cipher algorithms"
depends on S390 depends on S390
...@@ -131,20 +132,15 @@ config CRYPTO_AES_S390 ...@@ -131,20 +132,15 @@ config CRYPTO_AES_S390
select CRYPTO_BLKCIPHER select CRYPTO_BLKCIPHER
help help
This is the s390 hardware accelerated implementation of the This is the s390 hardware accelerated implementation of the
AES cipher algorithms (FIPS-197). AES uses the Rijndael AES cipher algorithms (FIPS-197).
algorithm.
Rijndael appears to be consistently a very good performer in
both hardware and software across a wide range of computing
environments regardless of its use in feedback or non-feedback
modes. Its key setup time is excellent, and its key agility is
good. Rijndael's very low memory requirements make it very well
suited for restricted-space environments, in which it also
demonstrates excellent performance. Rijndael's operations are
among the easiest to defend against power and timing attacks.
On s390 the System z9-109 currently only supports the key size As of z9 the ECB and CBC modes are hardware accelerated
of 128 bit. for 128 bit keys.
As of z10 the ECB and CBC modes are hardware accelerated
for all AES key sizes.
As of z196 the CTR mode is hardware accelerated for all AES
key sizes and XTS mode is hardware accelerated for 256 and
512 bit keys.
config S390_PRNG config S390_PRNG
tristate "Pseudo random number generator device driver" tristate "Pseudo random number generator device driver"
...@@ -154,8 +150,20 @@ config S390_PRNG ...@@ -154,8 +150,20 @@ config S390_PRNG
Select this option if you want to use the s390 pseudo random number Select this option if you want to use the s390 pseudo random number
generator. The PRNG is part of the cryptographic processor functions generator. The PRNG is part of the cryptographic processor functions
and uses triple-DES to generate secure random numbers like the and uses triple-DES to generate secure random numbers like the
ANSI X9.17 standard. The PRNG is usable via the char device ANSI X9.17 standard. User-space programs access the
/dev/prandom. pseudo-random-number device through the char device /dev/prandom.
It is available as of z9.
config CRYPTO_GHASH_S390
tristate "GHASH digest algorithm"
depends on S390
select CRYPTO_HASH
help
This is the s390 hardware accelerated implementation of the
GHASH message digest algorithm for GCM (Galois/Counter Mode).
It is available as of z196.
config CRYPTO_DEV_MV_CESA config CRYPTO_DEV_MV_CESA
tristate "Marvell's Cryptographic Engine" tristate "Marvell's Cryptographic Engine"
...@@ -200,6 +208,8 @@ config CRYPTO_DEV_HIFN_795X_RNG ...@@ -200,6 +208,8 @@ config CRYPTO_DEV_HIFN_795X_RNG
Select this option if you want to enable the random number generator Select this option if you want to enable the random number generator
on the HIFN 795x crypto adapters. on the HIFN 795x crypto adapters.
source drivers/crypto/caam/Kconfig
config CRYPTO_DEV_TALITOS config CRYPTO_DEV_TALITOS
tristate "Talitos Freescale Security Engine (SEC)" tristate "Talitos Freescale Security Engine (SEC)"
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
...@@ -269,4 +279,15 @@ config CRYPTO_DEV_PICOXCELL ...@@ -269,4 +279,15 @@ config CRYPTO_DEV_PICOXCELL
Saying m here will build a module named pipcoxcell_crypto. Saying m here will build a module named pipcoxcell_crypto.
config CRYPTO_DEV_S5P
tristate "Support for Samsung S5PV210 crypto accelerator"
depends on ARCH_S5PV210
select CRYPTO_AES
select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER
help
This option allows you to have support for S5P crypto acceleration.
Select this to offload Samsung S5PV210 or S5PC110 from AES
algorithms execution.
endif # CRYPTO_HW endif # CRYPTO_HW
...@@ -6,8 +6,10 @@ n2_crypto-y := n2_core.o n2_asm.o ...@@ -6,8 +6,10 @@ n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
config CRYPTO_DEV_FSL_CAAM
tristate "Freescale CAAM-Multicore driver backend"
depends on FSL_SOC
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
This module adds a job ring operation interface, and configures h/w
to operate as a DPAA component automatically, depending
on h/w feature availability.
To compile this driver as a module, choose M here: the module
will be called caam.
config CRYPTO_DEV_FSL_CAAM_RINGSIZE
int "Job Ring size"
depends on CRYPTO_DEV_FSL_CAAM
range 2 9
default "9"
help
Select size of Job Rings as a power of 2, within the
range 2-9 (ring size 4-512).
Examples:
2 => 4
3 => 8
4 => 16
5 => 32
6 => 64
7 => 128
8 => 256
9 => 512
config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing"
depends on CRYPTO_DEV_FSL_CAAM
default y
help
Enable the Job Ring's interrupt coalescing feature.
config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
int "Job Ring interrupt coalescing count threshold"
depends on CRYPTO_DEV_FSL_CAAM_INTC
range 1 255
default 255
help
Select number of descriptor completions to queue before
raising an interrupt, in the range 1-255. Note that a selection
of 1 functionally defeats the coalescing feature, and a selection
equal or greater than the job ring size will force timeouts.
config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
int "Job Ring interrupt coalescing timer threshold"
depends on CRYPTO_DEV_FSL_CAAM_INTC
range 1 65535
default 2048
help
Select number of bus clocks/64 to timeout in the case that one or
more descriptor completions are queued without reaching the count
threshold. Range is 1-65535.
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
depends on CRYPTO_DEV_FSL_CAAM
default y
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
help
Selecting this will offload crypto for users of the
scatterlist crypto API (such as the linux native IPSec
stack) to the SEC4 via job ring.
To compile this as a module, choose M here: the module
will be called caamalg.
#
# Makefile for the CAAM backend and dependent components
#
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
caam-objs := ctrl.o jr.o error.o
This diff is collapsed.
/*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
*/
#ifndef CAAM_COMPAT_H
#define CAAM_COMPAT_H
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/crypto.h>
#include <linux/hw_random.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/debugfs.h>
#include <linux/circ_buf.h>
#include <net/xfrm.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/sha.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
#endif /* !defined(CAAM_COMPAT_H) */
/*
* CAAM control-plane driver backend
* Controller-level driver, kernel property detection, initialization
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
*/
#include "compat.h"
#include "regs.h"
#include "intern.h"
#include "jr.h"
static int caam_remove(struct platform_device *pdev)
{
struct device *ctrldev;
struct caam_drv_private *ctrlpriv;
struct caam_drv_private_jr *jrpriv;
struct caam_full __iomem *topregs;
int ring, ret = 0;
ctrldev = &pdev->dev;
ctrlpriv = dev_get_drvdata(ctrldev);
topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
/* shut down JobRs */
for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
irq_dispose_mapping(jrpriv->irq);
}
/* Shut down debug views */
#ifdef CONFIG_DEBUG_FS
debugfs_remove_recursive(ctrlpriv->dfs_root);
#endif
/* Unmap controller region */
iounmap(&topregs->ctrl);
kfree(ctrlpriv->jrdev);
kfree(ctrlpriv);
return ret;
}
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
int d, ring, rspec;
struct device *dev;
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
struct caam_full __iomem *topregs;
struct caam_drv_private *ctrlpriv;
struct caam_perfmon *perfmon;
struct caam_deco **deco;
u32 deconum;
ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
if (!ctrlpriv)
return -ENOMEM;
dev = &pdev->dev;
dev_set_drvdata(dev, ctrlpriv);
ctrlpriv->pdev = pdev;
nprop = pdev->dev.of_node;
/* Get configuration properties from device tree */
/* First, get register page */
ctrl = of_iomap(nprop, 0);
if (ctrl == NULL) {
dev_err(dev, "caam: of_iomap() failed\n");
return -ENOMEM;
}
ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
/* topregs used to derive pointers to CAAM sub-blocks only */
topregs = (struct caam_full __iomem *)ctrl;
/* Get the IRQ of the controller (for security violations only) */
ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* 36-bit pointers in master configuration register
*/
setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
if (sizeof(dma_addr_t) == sizeof(u64))
dma_set_mask(dev, DMA_BIT_MASK(36));
/* Find out how many DECOs are present */
deconum = (rd_reg64(&topregs->ctrl.perfmon.cha_num) &
CHA_NUM_DECONUM_MASK) >> CHA_NUM_DECONUM_SHIFT;
ctrlpriv->deco = kmalloc(deconum * sizeof(struct caam_deco *),
GFP_KERNEL);
deco = (struct caam_deco __force **)&topregs->deco;
for (d = 0; d < deconum; d++)
ctrlpriv->deco[d] = deco[d];
/*
* Detect and enable JobRs
* First, find out how many ring spec'ed, allocate references
* for all, then go probe each one.
*/
rspec = 0;
for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
rspec++;
ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
if (ctrlpriv->jrdev == NULL) {
iounmap(&topregs->ctrl);
return -ENOMEM;
}
ring = 0;
ctrlpriv->total_jobrs = 0;
for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
caam_jr_probe(pdev, np, ring);
ctrlpriv->total_jobrs++;
ring++;
}
/* Check to see if QI present. If so, enable */
ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
CTPR_QI_MASK);
if (ctrlpriv->qi_present) {
ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
/* This is all that's required to physically enable QI */
wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
}
/* If no QI and no rings specified, quit and go home */
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
dev_err(dev, "no queues configured, terminating\n");
caam_remove(pdev);
return -ENOMEM;
}
/* NOTE: RTIC detection ought to go here, around Si time */
/* Initialize queue allocator lock */
spin_lock_init(&ctrlpriv->jr_alloc_lock);
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx\n",
rd_reg64(&topregs->ctrl.perfmon.caam_id));
dev_info(dev, "job rings = %d, qi = %d\n",
ctrlpriv->total_jobrs, ctrlpriv->qi_present);
#ifdef CONFIG_DEBUG_FS
/*
* FIXME: needs better naming distinction, as some amalgamation of
* "caam" and nprop->full_name. The OF name isn't distinctive,
* but does separate instances
*/
perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
/* Controller-level - performance monitor counters */
ctrlpriv->ctl_rq_dequeued =
debugfs_create_u64("rq_dequeued",
S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->req_dequeued);
ctrlpriv->ctl_ob_enc_req =
debugfs_create_u64("ob_rq_encrypted",
S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ob_enc_req);
ctrlpriv->ctl_ib_dec_req =
debugfs_create_u64("ib_rq_decrypted",
S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ib_dec_req);
ctrlpriv->ctl_ob_enc_bytes =
debugfs_create_u64("ob_bytes_encrypted",
S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ob_enc_bytes);
ctrlpriv->ctl_ob_prot_bytes =
debugfs_create_u64("ob_bytes_protected",
S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ob_prot_bytes);
ctrlpriv->ctl_ib_dec_bytes =
debugfs_create_u64("ib_bytes_decrypted",
S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ib_dec_bytes);
ctrlpriv->ctl_ib_valid_bytes =
debugfs_create_u64("ib_bytes_validated",
S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ib_valid_bytes);
/* Controller level - global status values */
ctrlpriv->ctl_faultaddr =
debugfs_create_u64("fault_addr",
S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->faultaddr);
ctrlpriv->ctl_faultdetail =
debugfs_create_u32("fault_detail",
S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->faultdetail);
ctrlpriv->ctl_faultstatus =
debugfs_create_u32("fault_status",
S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->status);
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_kek = debugfs_create_blob("kek",
S_IFCHR | S_IRUSR |
S_IRGRP | S_IROTH,
ctrlpriv->ctl,
&ctrlpriv->ctl_kek_wrap);
ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
S_IFCHR | S_IRUSR |
S_IRGRP | S_IROTH,
ctrlpriv->ctl,
&ctrlpriv->ctl_tkek_wrap);
ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
S_IFCHR | S_IRUSR |
S_IRGRP | S_IROTH,
ctrlpriv->ctl,
&ctrlpriv->ctl_tdsk_wrap);
#endif
return 0;
}
static struct of_device_id caam_match[] = {
{
.compatible = "fsl,sec-v4.0",
},
{},
};
MODULE_DEVICE_TABLE(of, caam_match);
static struct platform_driver caam_driver = {
.driver = {
.name = "caam",
.owner = THIS_MODULE,
.of_match_table = caam_match,
},
.probe = caam_probe,
.remove = __devexit_p(caam_remove),
};
static int __init caam_base_init(void)
{
return platform_driver_register(&caam_driver);
}
static void __exit caam_base_exit(void)
{
return platform_driver_unregister(&caam_driver);
}
module_init(caam_base_init);
module_exit(caam_base_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM request backend");
MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
This diff is collapsed.
/*
* caam descriptor construction helper functions
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
*/
#include "desc.h"
#define IMMEDIATE (1 << 23)
#define CAAM_CMD_SZ sizeof(u32)
#define CAAM_PTR_SZ sizeof(dma_addr_t)
#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * 64)
#ifdef DEBUG
#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
&__func__[sizeof("append")]); } while (0)
#else
#define PRINT_POS
#endif
#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
LDST_SRCDST_WORD_DECOCTRL | \
(LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
#define ENABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
LDST_SRCDST_WORD_DECOCTRL | \
(LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
static inline int desc_len(u32 *desc)
{
return *desc & HDR_DESCLEN_MASK;
}
static inline int desc_bytes(void *desc)
{
return desc_len(desc) * CAAM_CMD_SZ;
}
static inline u32 *desc_end(u32 *desc)
{
return desc + desc_len(desc);
}
static inline void *sh_desc_pdb(u32 *desc)
{
return desc + 1;
}
static inline void init_desc(u32 *desc, u32 options)
{
*desc = options | HDR_ONE | 1;
}
static inline void init_sh_desc(u32 *desc, u32 options)
{
PRINT_POS;
init_desc(desc, CMD_SHARED_DESC_HDR | options);
}
static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
{
u32 pdb_len = pdb_bytes / CAAM_CMD_SZ + 1;
init_sh_desc(desc, ((pdb_len << HDR_START_IDX_SHIFT) + pdb_len) |
options);
}
static inline void init_job_desc(u32 *desc, u32 options)
{
init_desc(desc, CMD_DESC_HDR | options);
}
static inline void append_ptr(u32 *desc, dma_addr_t ptr)
{
dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
*offset = ptr;
(*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ;
}
static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
u32 options)
{
PRINT_POS;
init_job_desc(desc, HDR_SHARED | options |
(len << HDR_START_IDX_SHIFT));
append_ptr(desc, ptr);
}
static inline void append_data(u32 *desc, void *data, int len)
{
u32 *offset = desc_end(desc);
if (len) /* avoid sparse warning: memcpy with byte count of 0 */
memcpy(offset, data, len);
(*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
}
static inline void append_cmd(u32 *desc, u32 command)
{
u32 *cmd = desc_end(desc);
*cmd = command;
(*desc)++;
}
static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
u32 command)
{
append_cmd(desc, command | len);
append_ptr(desc, ptr);
}
static inline void append_cmd_data(u32 *desc, void *data, int len,
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
append_data(desc, data, len);
}
static inline u32 *append_jump(u32 *desc, u32 options)
{
u32 *cmd = desc_end(desc);
PRINT_POS;
append_cmd(desc, CMD_JUMP | options);
return cmd;
}
static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
{
*jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc));
}
#define APPEND_CMD(cmd, op) \
static inline void append_##cmd(u32 *desc, u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | options); \
}
APPEND_CMD(operation, OPERATION)
APPEND_CMD(move, MOVE)
#define APPEND_CMD_LEN(cmd, op) \
static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | len | options); \
}
APPEND_CMD_LEN(seq_store, SEQ_STORE)
APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
#define APPEND_CMD_PTR(cmd, op) \
static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
u32 options) \
{ \
PRINT_POS; \
append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
}
APPEND_CMD_PTR(key, KEY)
APPEND_CMD_PTR(seq_in_ptr, SEQ_IN_PTR)
APPEND_CMD_PTR(seq_out_ptr, SEQ_OUT_PTR)
APPEND_CMD_PTR(load, LOAD)
APPEND_CMD_PTR(store, STORE)
APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
APPEND_CMD_PTR(fifo_store, FIFO_STORE)
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
append_cmd_data(desc, data, len, CMD_##op | options); \
}
APPEND_CMD_PTR_TO_IMM(load, LOAD);
APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
/*
* 2nd variant for commands whose specified immediate length differs
* from length of immediate data provided, e.g., split keys
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \
append_data(desc, data, data_len); \
}
APPEND_CMD_PTR_TO_IMM2(key, KEY);
#define APPEND_CMD_RAW_IMM(cmd, op, type) \
static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
append_cmd(desc, immediate); \
}
APPEND_CMD_RAW_IMM(load, LOAD, u32);
/*
* CAAM Error Reporting
*
* Copyright 2009-2011 Freescale Semiconductor, Inc.
*/
#include "compat.h"
#include "regs.h"
#include "intern.h"
#include "desc.h"
#include "jr.h"
#include "error.h"
#define SPRINTFCAT(str, format, param, max_alloc) \
{ \
char *tmp; \
\
tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
sprintf(tmp, format, param); \
strcat(str, tmp); \
kfree(tmp); \
}
static void report_jump_idx(u32 status, char *outstr)
{
u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
JRSTA_DECOERR_INDEX_SHIFT;
if (status & JRSTA_DECOERR_JUMP)
strcat(outstr, "jump tgt desc idx ");
else
strcat(outstr, "desc idx ");
SPRINTFCAT(outstr, "%d: ", idx, sizeof("255"));
}
static void report_ccb_status(u32 status, char *outstr)
{
char *cha_id_list[] = {
"",
"AES",
"DES, 3DES",
"ARC4",
"MD5, SHA-1, SH-224, SHA-256, SHA-384, SHA-512",
"RNG",
"SNOW f8",
"Kasumi f8, f9",
"All Public Key Algorithms",
"CRC",
"SNOW f9",
};
char *err_id_list[] = {
"None. No error.",
"Mode error.",
"Data size error.",
"Key size error.",
"PKHA A memory size error.",
"PKHA B memory size error.",
"Data arrived out of sequence error.",
"PKHA divide-by-zero error.",
"PKHA modulus even error.",
"DES key parity error.",
"ICV check failed.",
"Hardware error.",
"Unsupported CCM AAD size.",
"Class 1 CHA is not reset",
"Invalid CHA combination was selected",
"Invalid CHA selected.",
};
u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
JRSTA_CCBERR_CHAID_SHIFT;
u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
report_jump_idx(status, outstr);
if (cha_id < ARRAY_SIZE(cha_id_list)) {
SPRINTFCAT(outstr, "%s: ", cha_id_list[cha_id],
strlen(cha_id_list[cha_id]));
} else {
SPRINTFCAT(outstr, "unidentified cha_id value 0x%02x: ",
cha_id, sizeof("ff"));
}
if (err_id < ARRAY_SIZE(err_id_list)) {
SPRINTFCAT(outstr, "%s", err_id_list[err_id],
strlen(err_id_list[err_id]));
} else {
SPRINTFCAT(outstr, "unidentified err_id value 0x%02x",
err_id, sizeof("ff"));
}
}
static void report_jump_status(u32 status, char *outstr)
{
SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
}
static void report_deco_status(u32 status, char *outstr)
{
const struct {
u8 value;
char *error_text;
} desc_error_list[] = {
{ 0x00, "None. No error." },
{ 0x01, "SGT Length Error. The descriptor is trying to read "
"more data than is contained in the SGT table." },
{ 0x02, "Reserved." },
{ 0x03, "Job Ring Control Error. There is a bad value in the "
"Job Ring Control register." },
{ 0x04, "Invalid Descriptor Command. The Descriptor Command "
"field is invalid." },
{ 0x05, "Reserved." },
{ 0x06, "Invalid KEY Command" },
{ 0x07, "Invalid LOAD Command" },
{ 0x08, "Invalid STORE Command" },
{ 0x09, "Invalid OPERATION Command" },
{ 0x0A, "Invalid FIFO LOAD Command" },
{ 0x0B, "Invalid FIFO STORE Command" },
{ 0x0C, "Invalid MOVE Command" },
{ 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is "
"invalid because the target is not a Job Header "
"Command, or the jump is from a Trusted Descriptor to "
"a Job Descriptor, or because the target Descriptor "
"contains a Shared Descriptor." },
{ 0x0E, "Invalid MATH Command" },
{ 0x0F, "Invalid SIGNATURE Command" },
{ 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR "
"Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO "
"LOAD, or SEQ FIFO STORE decremented the input or "
"output sequence length below 0. This error may result "
"if a built-in PROTOCOL Command has encountered a "
"malformed PDU." },
{ 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
{ 0x12, "Shared Descriptor Header Error" },
{ 0x13, "Header Error. Invalid length or parity, or certain "
"other problems." },
{ 0x14, "Burster Error. Burster has gotten to an illegal "
"state" },
{ 0x15, "Context Register Length Error. The descriptor is "
"trying to read or write past the end of the Context "
"Register. A SEQ LOAD or SEQ STORE with the VLF bit "
"set was executed with too large a length in the "
"variable length register (VSOL for SEQ STORE or VSIL "
"for SEQ LOAD)." },
{ 0x16, "DMA Error" },
{ 0x17, "Reserved." },
{ 0x1A, "Job failed due to JR reset" },
{ 0x1B, "Job failed due to Fail Mode" },
{ 0x1C, "DECO Watchdog timer timeout error" },
{ 0x1D, "DECO tried to copy a key from another DECO but the "
"other DECO's Key Registers were locked" },
{ 0x1E, "DECO attempted to copy data from a DECO that had an "
"unmasked Descriptor error" },
{ 0x1F, "LIODN error. DECO was trying to share from itself or "
"from another DECO but the two Non-SEQ LIODN values "
"didn't match or the 'shared from' DECO's Descriptor "
"required that the SEQ LIODNs be the same and they "
"aren't." },
{ 0x20, "DECO has completed a reset initiated via the DRR "
"register" },
{ 0x21, "Nonce error. When using EKT (CCM) key encryption "
"option in the FIFO STORE Command, the Nonce counter "
"reached its maximum value and this encryption mode "
"can no longer be used." },
{ 0x22, "Meta data is too large (> 511 bytes) for TLS decap "
"(input frame; block ciphers) and IPsec decap (output "
"frame, when doing the next header byte update) and "
"DCRC (output frame)." },
{ 0x80, "DNR (do not run) error" },
{ 0x81, "undefined protocol command" },
{ 0x82, "invalid setting in PDB" },
{ 0x83, "Anti-replay LATE error" },
{ 0x84, "Anti-replay REPLAY error" },
{ 0x85, "Sequence number overflow" },
{ 0x86, "Sigver invalid signature" },
{ 0x87, "DSA Sign Illegal test descriptor" },
{ 0x88, "Protocol Format Error - A protocol has seen an error "
"in the format of data received. When running RSA, "
"this means that formatting with random padding was "
"used, and did not follow the form: 0x00, 0x02, 8-to-N "
"bytes of non-zero pad, 0x00, F data." },
{ 0x89, "Protocol Size Error - A protocol has seen an error in "
"size. When running RSA, pdb size N < (size of F) when "
"no formatting is used; or pdb size N < (F + 11) when "
"formatting is used." },
{ 0xC1, "Blob Command error: Undefined mode" },
{ 0xC2, "Blob Command error: Secure Memory Blob mode error" },
{ 0xC4, "Blob Command error: Black Blob key or input size "
"error" },
{ 0xC5, "Blob Command error: Invalid key destination" },
{ 0xC8, "Blob Command error: Trusted/Secure mode error" },
{ 0xF0, "IPsec TTL or hop limit field either came in as 0, "
"or was decremented to 0" },
{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
};
u8 desc_error = status & JRSTA_DECOERR_ERROR_MASK;
int i;
report_jump_idx(status, outstr);
for (i = 0; i < ARRAY_SIZE(desc_error_list); i++)
if (desc_error_list[i].value == desc_error)
break;
if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text) {
SPRINTFCAT(outstr, "%s", desc_error_list[i].error_text,
strlen(desc_error_list[i].error_text));
} else {
SPRINTFCAT(outstr, "unidentified error value 0x%02x",
desc_error, sizeof("ff"));
}
}
static void report_jr_status(u32 status, char *outstr)
{
SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
}
static void report_cond_code_status(u32 status, char *outstr)
{
SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
}
char *caam_jr_strstatus(char *outstr, u32 status)
{
struct stat_src {
void (*report_ssed)(u32 status, char *outstr);
char *error;
} status_src[] = {
{ NULL, "No error" },
{ NULL, NULL },
{ report_ccb_status, "CCB" },
{ report_jump_status, "Jump" },
{ report_deco_status, "DECO" },
{ NULL, NULL },
{ report_jr_status, "Job Ring" },
{ report_cond_code_status, "Condition Code" },
};
u32 ssrc = status >> JRSTA_SSRC_SHIFT;
sprintf(outstr, "%s: ", status_src[ssrc].error);
if (status_src[ssrc].report_ssed)
status_src[ssrc].report_ssed(status, outstr);
return outstr;
}
EXPORT_SYMBOL(caam_jr_strstatus);
/*
* CAAM Error Reporting code header
*
* Copyright 2009-2011 Freescale Semiconductor, Inc.
*/
#ifndef CAAM_ERROR_H
#define CAAM_ERROR_H
#define CAAM_ERROR_STR_MAX 302
extern char *caam_jr_strstatus(char *outstr, u32 status);
#endif /* CAAM_ERROR_H */
/*
* CAAM/SEC 4.x driver backend
* Private/internal definitions between modules
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
*
*/
#ifndef INTERN_H
#define INTERN_H
#define JOBR_UNASSIGNED 0
#define JOBR_ASSIGNED 1
/* Currently comes from Kconfig param as a ^2 (driver-required) */
#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
/* Kconfig params for interrupt coalescing if selected (else zero) */
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_INTC
#define JOBR_INTC JRCFG_ICEN
#define JOBR_INTC_TIME_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
#define JOBR_INTC_COUNT_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
#else
#define JOBR_INTC 0
#define JOBR_INTC_TIME_THLD 0
#define JOBR_INTC_COUNT_THLD 0
#endif
/*
* Storage for tracking each in-process entry moving across a ring
* Each entry on an output ring needs one of these
*/
struct caam_jrentry_info {
void (*callbk)(struct device *dev, u32 *desc, u32 status, void *arg);
void *cbkarg; /* Argument per ring entry */
u32 *desc_addr_virt; /* Stored virt addr for postprocessing */
dma_addr_t desc_addr_dma; /* Stored bus addr for done matching */
u32 desc_size; /* Stored size for postprocessing, header derived */
};
/* Private sub-storage for a single JobR */
struct caam_drv_private_jr {
struct device *parentdev; /* points back to controller dev */
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct tasklet_struct irqtask[NR_CPUS];
int irq; /* One per queue */
int assign; /* busy/free */
/* Job ring info */
int ringsize; /* Size of rings (assume input = output) */
struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
int inp_ring_write_index; /* Input index "tail" */
int head; /* entinfo (s/w ring) head index */
dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */
spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
int out_ring_read_index; /* Output index "tail" */
int tail; /* entinfo (s/w ring) tail index */
struct jr_outentry *outring; /* Base of output ring, DMA-safe */
};
/*
* Driver-private storage for a single CAAM block instance
*/
struct caam_drv_private {
struct device *dev;
struct device **jrdev; /* Alloc'ed array per sub-device */
spinlock_t jr_alloc_lock;
struct platform_device *pdev;
/* Physical-presence section */
struct caam_ctrl *ctrl; /* controller region */
struct caam_deco **deco; /* DECO/CCB views */
struct caam_assurance *ac;
struct caam_queue_if *qi; /* QI control region */
/*
* Detected geometry block. Filled in from device tree if powerpc,
* or from register-based version detection code
*/
u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
/* which jr allocated to scatterlist crypto */
atomic_t tfm_count ____cacheline_aligned;
int num_jrs_for_algapi;
struct device **algapi_jr;
/* list of registered crypto algorithms (mk generic context handle?) */
struct list_head alg_list;
/*
* debugfs entries for developer view into driver/device
* variables at runtime.
*/
#ifdef CONFIG_DEBUG_FS
struct dentry *dfs_root;
struct dentry *ctl; /* controller dir */
struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
#endif
};
void caam_jr_algapi_init(struct device *dev);
void caam_jr_algapi_remove(struct device *dev);
#endif /* INTERN_H */
This diff is collapsed.
/*
* CAAM public-level include definitions for the JobR backend
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
*/
#ifndef JR_H
#define JR_H
/* Prototypes for backend-level services exposed to APIs */
int caam_jr_register(struct device *ctrldev, struct device **rdev);
int caam_jr_deregister(struct device *rdev);
int caam_jr_enqueue(struct device *dev, u32 *desc,
void (*cbk)(struct device *dev, u32 *desc, u32 status,
void *areq),
void *areq);
extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
int ring);
extern int caam_jr_shutdown(struct device *dev);
#endif /* JR_H */
This diff is collapsed.
...@@ -133,7 +133,6 @@ struct mv_req_hash_ctx { ...@@ -133,7 +133,6 @@ struct mv_req_hash_ctx {
int extra_bytes; /* unprocessed bytes in buffer */ int extra_bytes; /* unprocessed bytes in buffer */
enum hash_op op; enum hash_op op;
int count_add; int count_add;
struct scatterlist dummysg;
}; };
static void compute_aes_dec_key(struct mv_ctx *ctx) static void compute_aes_dec_key(struct mv_ctx *ctx)
...@@ -187,9 +186,9 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) ...@@ -187,9 +186,9 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
{ {
int ret; int ret;
void *sbuf; void *sbuf;
int copied = 0; int copy_len;
while (1) { while (len) {
if (!p->sg_src_left) { if (!p->sg_src_left) {
ret = sg_miter_next(&p->src_sg_it); ret = sg_miter_next(&p->src_sg_it);
BUG_ON(!ret); BUG_ON(!ret);
...@@ -199,19 +198,14 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) ...@@ -199,19 +198,14 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
sbuf = p->src_sg_it.addr + p->src_start; sbuf = p->src_sg_it.addr + p->src_start;
if (p->sg_src_left <= len - copied) { copy_len = min(p->sg_src_left, len);
memcpy(dbuf + copied, sbuf, p->sg_src_left); memcpy(dbuf, sbuf, copy_len);
copied += p->sg_src_left;
p->sg_src_left = 0; p->src_start += copy_len;
if (copied >= len) p->sg_src_left -= copy_len;
break;
} else { len -= copy_len;
int copy_len = len - copied; dbuf += copy_len;
memcpy(dbuf + copied, sbuf, copy_len);
p->src_start += copy_len;
p->sg_src_left -= copy_len;
break;
}
} }
} }
...@@ -275,7 +269,6 @@ static void mv_process_current_q(int first_block) ...@@ -275,7 +269,6 @@ static void mv_process_current_q(int first_block)
memcpy(cpg->sram + SRAM_CONFIG, &op, memcpy(cpg->sram + SRAM_CONFIG, &op,
sizeof(struct sec_accel_config)); sizeof(struct sec_accel_config));
writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
/* GO */ /* GO */
writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
...@@ -302,6 +295,7 @@ static void mv_crypto_algo_completion(void) ...@@ -302,6 +295,7 @@ static void mv_crypto_algo_completion(void)
static void mv_process_hash_current(int first_block) static void mv_process_hash_current(int first_block)
{ {
struct ahash_request *req = ahash_request_cast(cpg->cur_req); struct ahash_request *req = ahash_request_cast(cpg->cur_req);
const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
struct req_progress *p = &cpg->p; struct req_progress *p = &cpg->p;
struct sec_accel_config op = { 0 }; struct sec_accel_config op = { 0 };
...@@ -314,6 +308,8 @@ static void mv_process_hash_current(int first_block) ...@@ -314,6 +308,8 @@ static void mv_process_hash_current(int first_block)
break; break;
case COP_HMAC_SHA1: case COP_HMAC_SHA1:
op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
memcpy(cpg->sram + SRAM_HMAC_IV_IN,
tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
break; break;
} }
...@@ -345,11 +341,16 @@ static void mv_process_hash_current(int first_block) ...@@ -345,11 +341,16 @@ static void mv_process_hash_current(int first_block)
op.config |= CFG_LAST_FRAG; op.config |= CFG_LAST_FRAG;
else else
op.config |= CFG_MID_FRAG; op.config |= CFG_MID_FRAG;
writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
} }
memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
/* GO */ /* GO */
writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
...@@ -409,12 +410,6 @@ static void mv_hash_algo_completion(void) ...@@ -409,12 +410,6 @@ static void mv_hash_algo_completion(void)
copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
sg_miter_stop(&cpg->p.src_sg_it); sg_miter_stop(&cpg->p.src_sg_it);
ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
if (likely(ctx->last_chunk)) { if (likely(ctx->last_chunk)) {
if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
...@@ -422,6 +417,12 @@ static void mv_hash_algo_completion(void) ...@@ -422,6 +417,12 @@ static void mv_hash_algo_completion(void)
(req))); (req)));
} else } else
mv_hash_final_fallback(req); mv_hash_final_fallback(req);
} else {
ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
} }
} }
...@@ -480,7 +481,7 @@ static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) ...@@ -480,7 +481,7 @@ static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
int i = 0; int i = 0;
size_t cur_len; size_t cur_len;
while (1) { while (sl) {
cur_len = sl[i].length; cur_len = sl[i].length;
++i; ++i;
if (total_bytes > cur_len) if (total_bytes > cur_len)
...@@ -517,29 +518,12 @@ static void mv_start_new_hash_req(struct ahash_request *req) ...@@ -517,29 +518,12 @@ static void mv_start_new_hash_req(struct ahash_request *req)
{ {
struct req_progress *p = &cpg->p; struct req_progress *p = &cpg->p;
struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
int num_sgs, hw_bytes, old_extra_bytes, rc; int num_sgs, hw_bytes, old_extra_bytes, rc;
cpg->cur_req = &req->base; cpg->cur_req = &req->base;
memset(p, 0, sizeof(struct req_progress)); memset(p, 0, sizeof(struct req_progress));
hw_bytes = req->nbytes + ctx->extra_bytes; hw_bytes = req->nbytes + ctx->extra_bytes;
old_extra_bytes = ctx->extra_bytes; old_extra_bytes = ctx->extra_bytes;
if (unlikely(ctx->extra_bytes)) {
memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
ctx->extra_bytes);
p->crypt_len = ctx->extra_bytes;
}
memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
if (unlikely(!ctx->first_hash)) {
writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
}
ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
if (ctx->extra_bytes != 0 if (ctx->extra_bytes != 0
&& (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
...@@ -555,6 +539,12 @@ static void mv_start_new_hash_req(struct ahash_request *req) ...@@ -555,6 +539,12 @@ static void mv_start_new_hash_req(struct ahash_request *req)
p->complete = mv_hash_algo_completion; p->complete = mv_hash_algo_completion;
p->process = mv_process_hash_current; p->process = mv_process_hash_current;
if (unlikely(old_extra_bytes)) {
memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
old_extra_bytes);
p->crypt_len = old_extra_bytes;
}
mv_process_hash_current(1); mv_process_hash_current(1);
} else { } else {
copy_src_to_buf(p, ctx->buffer + old_extra_bytes, copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
...@@ -603,9 +593,7 @@ static int queue_manag(void *data) ...@@ -603,9 +593,7 @@ static int queue_manag(void *data)
if (async_req->tfm->__crt_alg->cra_type != if (async_req->tfm->__crt_alg->cra_type !=
&crypto_ahash_type) { &crypto_ahash_type) {
struct ablkcipher_request *req = struct ablkcipher_request *req =
container_of(async_req, ablkcipher_request_cast(async_req);
struct ablkcipher_request,
base);
mv_start_new_crypt_req(req); mv_start_new_crypt_req(req);
} else { } else {
struct ahash_request *req = struct ahash_request *req =
...@@ -722,19 +710,13 @@ static int mv_hash_update(struct ahash_request *req) ...@@ -722,19 +710,13 @@ static int mv_hash_update(struct ahash_request *req)
static int mv_hash_final(struct ahash_request *req) static int mv_hash_final(struct ahash_request *req)
{ {
struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
/* dummy buffer of 4 bytes */
sg_init_one(&ctx->dummysg, ctx->buffer, 4);
/* I think I'm allowed to do that... */
ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
mv_update_hash_req_ctx(ctx, 1, 0); mv_update_hash_req_ctx(ctx, 1, 0);
return mv_handle_req(&req->base); return mv_handle_req(&req->base);
} }
static int mv_hash_finup(struct ahash_request *req) static int mv_hash_finup(struct ahash_request *req)
{ {
if (!req->nbytes)
return mv_hash_final(req);
mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
return mv_handle_req(&req->base); return mv_handle_req(&req->base);
} }
...@@ -1065,14 +1047,21 @@ static int mv_probe(struct platform_device *pdev) ...@@ -1065,14 +1047,21 @@ static int mv_probe(struct platform_device *pdev)
writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
ret = crypto_register_alg(&mv_aes_alg_ecb); ret = crypto_register_alg(&mv_aes_alg_ecb);
if (ret) if (ret) {
printk(KERN_WARNING MV_CESA
"Could not register aes-ecb driver\n");
goto err_irq; goto err_irq;
}
ret = crypto_register_alg(&mv_aes_alg_cbc); ret = crypto_register_alg(&mv_aes_alg_cbc);
if (ret) if (ret) {
printk(KERN_WARNING MV_CESA
"Could not register aes-cbc driver\n");
goto err_unreg_ecb; goto err_unreg_ecb;
}
ret = crypto_register_ahash(&mv_sha1_alg); ret = crypto_register_ahash(&mv_sha1_alg);
if (ret == 0) if (ret == 0)
......
...@@ -78,7 +78,6 @@ ...@@ -78,7 +78,6 @@
#define FLAGS_SHA1 0x0010 #define FLAGS_SHA1 0x0010
#define FLAGS_DMA_ACTIVE 0x0020 #define FLAGS_DMA_ACTIVE 0x0020
#define FLAGS_OUTPUT_READY 0x0040 #define FLAGS_OUTPUT_READY 0x0040
#define FLAGS_CLEAN 0x0080
#define FLAGS_INIT 0x0100 #define FLAGS_INIT 0x0100
#define FLAGS_CPU 0x0200 #define FLAGS_CPU 0x0200
#define FLAGS_HMAC 0x0400 #define FLAGS_HMAC 0x0400
...@@ -511,26 +510,6 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) ...@@ -511,26 +510,6 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
return 0; return 0;
} }
static void omap_sham_cleanup(struct ahash_request *req)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
struct omap_sham_dev *dd = ctx->dd;
unsigned long flags;
spin_lock_irqsave(&dd->lock, flags);
if (ctx->flags & FLAGS_CLEAN) {
spin_unlock_irqrestore(&dd->lock, flags);
return;
}
ctx->flags |= FLAGS_CLEAN;
spin_unlock_irqrestore(&dd->lock, flags);
if (ctx->digcnt)
omap_sham_copy_ready_hash(req);
dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
}
static int omap_sham_init(struct ahash_request *req) static int omap_sham_init(struct ahash_request *req)
{ {
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
...@@ -618,9 +597,8 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) ...@@ -618,9 +597,8 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
return err; return err;
} }
static int omap_sham_finish_req_hmac(struct ahash_request *req) static int omap_sham_finish_hmac(struct ahash_request *req)
{ {
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
struct omap_sham_hmac_ctx *bctx = tctx->base; struct omap_sham_hmac_ctx *bctx = tctx->base;
int bs = crypto_shash_blocksize(bctx->shash); int bs = crypto_shash_blocksize(bctx->shash);
...@@ -635,7 +613,24 @@ static int omap_sham_finish_req_hmac(struct ahash_request *req) ...@@ -635,7 +613,24 @@ static int omap_sham_finish_req_hmac(struct ahash_request *req)
return crypto_shash_init(&desc.shash) ?: return crypto_shash_init(&desc.shash) ?:
crypto_shash_update(&desc.shash, bctx->opad, bs) ?: crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
crypto_shash_finup(&desc.shash, ctx->digest, ds, ctx->digest); crypto_shash_finup(&desc.shash, req->result, ds, req->result);
}
static int omap_sham_finish(struct ahash_request *req)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
struct omap_sham_dev *dd = ctx->dd;
int err = 0;
if (ctx->digcnt) {
omap_sham_copy_ready_hash(req);
if (ctx->flags & FLAGS_HMAC)
err = omap_sham_finish_hmac(req);
}
dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
return err;
} }
static void omap_sham_finish_req(struct ahash_request *req, int err) static void omap_sham_finish_req(struct ahash_request *req, int err)
...@@ -645,15 +640,12 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) ...@@ -645,15 +640,12 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
if (!err) { if (!err) {
omap_sham_copy_hash(ctx->dd->req, 1); omap_sham_copy_hash(ctx->dd->req, 1);
if (ctx->flags & FLAGS_HMAC) if (ctx->flags & FLAGS_FINAL)
err = omap_sham_finish_req_hmac(req); err = omap_sham_finish(req);
} else { } else {
ctx->flags |= FLAGS_ERROR; ctx->flags |= FLAGS_ERROR;
} }
if ((ctx->flags & FLAGS_FINAL) || err)
omap_sham_cleanup(req);
clk_disable(dd->iclk); clk_disable(dd->iclk);
dd->flags &= ~FLAGS_BUSY; dd->flags &= ~FLAGS_BUSY;
...@@ -809,22 +801,21 @@ static int omap_sham_final_shash(struct ahash_request *req) ...@@ -809,22 +801,21 @@ static int omap_sham_final_shash(struct ahash_request *req)
static int omap_sham_final(struct ahash_request *req) static int omap_sham_final(struct ahash_request *req)
{ {
struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err = 0;
ctx->flags |= FLAGS_FINUP; ctx->flags |= FLAGS_FINUP;
if (!(ctx->flags & FLAGS_ERROR)) { if (ctx->flags & FLAGS_ERROR)
/* OMAP HW accel works only with buffers >= 9 */ return 0; /* uncompleted hash is not needed */
/* HMAC is always >= 9 because of ipad */
if ((ctx->digcnt + ctx->bufcnt) < 9)
err = omap_sham_final_shash(req);
else if (ctx->bufcnt)
return omap_sham_enqueue(req, OP_FINAL);
}
omap_sham_cleanup(req); /* OMAP HW accel works only with buffers >= 9 */
/* HMAC is always >= 9 because ipad == block size */
if ((ctx->digcnt + ctx->bufcnt) < 9)
return omap_sham_final_shash(req);
else if (ctx->bufcnt)
return omap_sham_enqueue(req, OP_FINAL);
return err; /* copy ready hash (+ finalize hmac) */
return omap_sham_finish(req);
} }
static int omap_sham_finup(struct ahash_request *req) static int omap_sham_finup(struct ahash_request *req)
...@@ -835,7 +826,7 @@ static int omap_sham_finup(struct ahash_request *req) ...@@ -835,7 +826,7 @@ static int omap_sham_finup(struct ahash_request *req)
ctx->flags |= FLAGS_FINUP; ctx->flags |= FLAGS_FINUP;
err1 = omap_sham_update(req); err1 = omap_sham_update(req);
if (err1 == -EINPROGRESS) if (err1 == -EINPROGRESS || err1 == -EBUSY)
return err1; return err1;
/* /*
* final() has to be always called to cleanup resources * final() has to be always called to cleanup resources
...@@ -890,8 +881,6 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) ...@@ -890,8 +881,6 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
const char *alg_name = crypto_tfm_alg_name(tfm); const char *alg_name = crypto_tfm_alg_name(tfm);
pr_info("enter\n");
/* Allocate a fallback and abort if it failed. */ /* Allocate a fallback and abort if it failed. */
tctx->fallback = crypto_alloc_shash(alg_name, 0, tctx->fallback = crypto_alloc_shash(alg_name, 0,
CRYPTO_ALG_NEED_FALLBACK); CRYPTO_ALG_NEED_FALLBACK);
...@@ -1297,7 +1286,8 @@ static int __init omap_sham_mod_init(void) ...@@ -1297,7 +1286,8 @@ static int __init omap_sham_mod_init(void)
pr_info("loading %s driver\n", "omap-sham"); pr_info("loading %s driver\n", "omap-sham");
if (!cpu_class_is_omap2() || if (!cpu_class_is_omap2() ||
omap_type() != OMAP2_DEVICE_TYPE_SEC) { (omap_type() != OMAP2_DEVICE_TYPE_SEC &&
omap_type() != OMAP2_DEVICE_TYPE_EMU)) {
pr_err("Unsupported cpu\n"); pr_err("Unsupported cpu\n");
return -ENODEV; return -ENODEV;
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment