Commit 3604a7f5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'v6.1-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "API:
   - Feed untrusted RNGs into /dev/random
   - Allow HWRNG sleeping to be more interruptible
   - Create lib/utils module
   - Setting private keys no longer required for akcipher
   - Remove tcrypt mode=1000
   - Reorganised Kconfig entries

  Algorithms:
   - Load x86/sha512 based on CPU features
   - Add AES-NI/AVX/x86_64/GFNI assembler implementation of aria cipher

  Drivers:
   - Add HACE crypto driver aspeed"

* tag 'v6.1-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (124 commits)
  crypto: aspeed - Remove redundant dev_err call
  crypto: scatterwalk - Remove unused inline function scatterwalk_aligned()
  crypto: aead - Remove unused inline functions from aead
  crypto: bcm - Simplify obtain the name for cipher
  crypto: marvell/octeontx - use sysfs_emit() to instead of scnprintf()
  hwrng: core - start hwrng kthread also for untrusted sources
  crypto: zip - remove the unneeded result variable
  crypto: qat - add limit to linked list parsing
  crypto: octeontx2 - Remove the unneeded result variable
  crypto: ccp - Remove the unneeded result variable
  crypto: aspeed - Fix check for platform_get_irq() errors
  crypto: virtio - fix memory-leak
  crypto: cavium - prevent integer overflow loading firmware
  crypto: marvell/octeontx - prevent integer overflows
  crypto: aspeed - fix build error when only CRYPTO_DEV_ASPEED is enabled
  crypto: hisilicon/qm - fix the qos value initialization
  crypto: sun4i-ss - use DEFINE_SHOW_ATTRIBUTE to simplify sun4i_ss_debugfs
  crypto: tcrypt - add async speed test for aria cipher
  crypto: aria-avx - add AES-NI/AVX/x86_64/GFNI assembler implementation of aria cipher
  crypto: aria - prepare generic module for optimized implementations
  ...
parents d4013bc4 b411b1a0
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/crypto/aspeed,ast2500-hace.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: ASPEED HACE hash and crypto Hardware Accelerator Engines
maintainers:
- Neal Liu <neal_liu@aspeedtech.com>
description: |
The Hash and Crypto Engine (HACE) is designed to accelerate the throughput
of hash data digest, encryption, and decryption. Basically, HACE can be
divided into two independently engines - Hash Engine and Crypto Engine.
properties:
compatible:
enum:
- aspeed,ast2500-hace
- aspeed,ast2600-hace
reg:
maxItems: 1
clocks:
maxItems: 1
interrupts:
maxItems: 1
resets:
maxItems: 1
required:
- compatible
- reg
- clocks
- interrupts
- resets
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/ast2600-clock.h>
hace: crypto@1e6d0000 {
compatible = "aspeed,ast2600-hace";
reg = <0x1e6d0000 0x200>;
interrupts = <4>;
clocks = <&syscon ASPEED_CLK_GATE_YCLK>;
resets = <&syscon ASPEED_RESET_HACE>;
};
......@@ -89,9 +89,8 @@ context. In a typical workflow, this command should be the first command issued.
The firmware can be initialized either by using its own non-volatile storage or
the OS can manage the NV storage for the firmware using the module parameter
``init_ex_path``. The file specified by ``init_ex_path`` must exist. To create
a new NV storage file allocate the file with 32KB bytes of 0xFF as required by
the SEV spec.
``init_ex_path``. If the file specified by ``init_ex_path`` does not exist or
is invalid, the OS will create or override the file with output from PSP.
Returns: 0 on success, -negative on error
......
......@@ -3237,6 +3237,13 @@ S: Maintained
F: Documentation/devicetree/bindings/usb/aspeed,ast2600-udc.yaml
F: drivers/usb/gadget/udc/aspeed_udc.c
ASPEED CRYPTO DRIVER
M: Neal Liu <neal_liu@aspeedtech.com>
L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/crypto/aspeed,ast2500-hace.yaml
F: drivers/crypto/aspeed/
ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS
M: Corentin Chary <corentin.chary@gmail.com>
L: acpi4asus-user@lists.sourceforge.net
......
......@@ -1850,8 +1850,4 @@ config ARCH_HIBERNATION_POSSIBLE
endmenu
if CRYPTO
source "arch/arm/crypto/Kconfig"
endif
source "arch/arm/Kconfig.assembler"
......@@ -262,6 +262,14 @@ rng: hwrng@1e6e2078 {
quality = <100>;
};
hace: crypto@1e6e3000 {
compatible = "aspeed,ast2500-hace";
reg = <0x1e6e3000 0x100>;
interrupts = <4>;
clocks = <&syscon ASPEED_CLK_GATE_YCLK>;
resets = <&syscon ASPEED_RESET_HACE>;
};
gfx: display@1e6e6000 {
compatible = "aspeed,ast2500-gfx", "syscon";
reg = <0x1e6e6000 0x1000>;
......
......@@ -323,6 +323,14 @@ apb {
#size-cells = <1>;
ranges;
hace: crypto@1e6d0000 {
compatible = "aspeed,ast2600-hace";
reg = <0x1e6d0000 0x200>;
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&syscon ASPEED_CLK_GATE_YCLK>;
resets = <&syscon ASPEED_RESET_HACE>;
};
syscon: syscon@1e6e2000 {
compatible = "aspeed,ast2600-scu", "syscon", "simple-mfd";
reg = <0x1e6e2000 0x1000>;
......
......@@ -32,7 +32,6 @@ CONFIG_KERNEL_MODE_NEON=y
CONFIG_PM_DEBUG=y
CONFIG_PM_ADVANCED_DEBUG=y
CONFIG_ENERGY_MODEL=y
CONFIG_ARM_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM_NEON=m
CONFIG_CRYPTO_SHA256_ARM=m
CONFIG_CRYPTO_SHA512_ARM=m
......
......@@ -44,7 +44,6 @@ CONFIG_ARM_CPUIDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_KERNEL_MODE_NEON=y
CONFIG_ARM_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM_NEON=m
CONFIG_CRYPTO_SHA1_ARM_CE=m
CONFIG_CRYPTO_SHA2_ARM_CE=m
......
......@@ -132,7 +132,6 @@ CONFIG_ARM_EXYNOS_CPUIDLE=y
CONFIG_ARM_TEGRA_CPUIDLE=y
CONFIG_ARM_QCOM_SPM_CPUIDLE=y
CONFIG_KERNEL_MODE_NEON=y
CONFIG_ARM_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM_NEON=m
CONFIG_CRYPTO_SHA1_ARM_CE=m
CONFIG_CRYPTO_SHA2_ARM_CE=m
......
......@@ -53,7 +53,6 @@ CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
CONFIG_KERNEL_MODE_NEON=y
CONFIG_PM_DEBUG=y
CONFIG_ARM_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM_NEON=m
CONFIG_CRYPTO_SHA256_ARM=m
CONFIG_CRYPTO_SHA512_ARM=m
......
......@@ -34,7 +34,6 @@ CONFIG_CPUFREQ_DT=m
CONFIG_ARM_PXA2xx_CPUFREQ=m
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
CONFIG_ARM_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM=m
CONFIG_CRYPTO_SHA256_ARM=m
CONFIG_CRYPTO_SHA512_ARM=m
......
This diff is collapsed.
......@@ -2251,6 +2251,3 @@ source "drivers/acpi/Kconfig"
source "arch/arm64/kvm/Kconfig"
if CRYPTO
source "arch/arm64/crypto/Kconfig"
endif # CRYPTO
......@@ -112,7 +112,6 @@ CONFIG_ACPI_APEI_MEMORY_FAILURE=y
CONFIG_ACPI_APEI_EINJ=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
CONFIG_CRYPTO_SHA512_ARM64_CE=m
......
This diff is collapsed.
# SPDX-License-Identifier: GPL-2.0
menu "Accelerated Cryptographic Algorithms for CPU (mips)"
config CRYPTO_CRC32_MIPS
tristate "CRC32c and CRC32"
depends on MIPS_CRC_SUPPORT
select CRYPTO_HASH
help
CRC32c and CRC32 CRC algorithms
Architecture: mips
config CRYPTO_POLY1305_MIPS
tristate "Hash functions: Poly1305"
depends on MIPS
select CRYPTO_ARCH_HAVE_LIB_POLY1305
help
Poly1305 authenticator algorithm (RFC7539)
Architecture: mips
config CRYPTO_MD5_OCTEON
tristate "Digests: MD5 (OCTEON)"
depends on CPU_CAVIUM_OCTEON
select CRYPTO_MD5
select CRYPTO_HASH
help
MD5 message digest algorithm (RFC1321)
Architecture: mips OCTEON using crypto instructions, when available
config CRYPTO_SHA1_OCTEON
tristate "Hash functions: SHA-1 (OCTEON)"
depends on CPU_CAVIUM_OCTEON
select CRYPTO_SHA1
select CRYPTO_HASH
help
SHA-1 secure hash algorithm (FIPS 180)
Architecture: mips OCTEON
config CRYPTO_SHA256_OCTEON
tristate "Hash functions: SHA-224 and SHA-256 (OCTEON)"
depends on CPU_CAVIUM_OCTEON
select CRYPTO_SHA256
select CRYPTO_HASH
help
SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
Architecture: mips OCTEON using crypto instructions, when available
config CRYPTO_SHA512_OCTEON
tristate "Hash functions: SHA-384 and SHA-512 (OCTEON)"
depends on CPU_CAVIUM_OCTEON
select CRYPTO_SHA512
select CRYPTO_HASH
help
SHA-384 and SHA-512 secure hash algorithms (FIPS 180)
Architecture: mips OCTEON using crypto instructions, when available
config CRYPTO_CHACHA_MIPS
tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (MIPS32r2)"
depends on CPU_MIPS32_R2
select CRYPTO_SKCIPHER
select CRYPTO_ARCH_HAVE_LIB_CHACHA
help
Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
stream cipher algorithms
Architecture: MIPS32r2
endmenu
# SPDX-License-Identifier: GPL-2.0
menu "Accelerated Cryptographic Algorithms for CPU (powerpc)"
config CRYPTO_CRC32C_VPMSUM
tristate "CRC32c"
depends on PPC64 && ALTIVEC
select CRYPTO_HASH
select CRC32
help
CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720)
Architecture: powerpc64 using
- AltiVec extensions
Enable on POWER8 and newer processors for improved performance.
config CRYPTO_CRCT10DIF_VPMSUM
tristate "CRC32T10DIF"
depends on PPC64 && ALTIVEC && CRC_T10DIF
select CRYPTO_HASH
help
CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
Architecture: powerpc64 using
- AltiVec extensions
Enable on POWER8 and newer processors for improved performance.
config CRYPTO_VPMSUM_TESTER
tristate "CRC32c and CRC32T10DIF hardware acceleration tester"
depends on CRYPTO_CRCT10DIF_VPMSUM && CRYPTO_CRC32C_VPMSUM
help
Stress test for CRC32c and CRCT10DIF algorithms implemented with
powerpc64 AltiVec extensions (POWER8 vpmsum instructions).
Unless you are testing these algorithms, you don't need this.
config CRYPTO_MD5_PPC
tristate "Digests: MD5"
depends on PPC
select CRYPTO_HASH
help
MD5 message digest algorithm (RFC1321)
Architecture: powerpc
config CRYPTO_SHA1_PPC
tristate "Hash functions: SHA-1"
depends on PPC
help
SHA-1 secure hash algorithm (FIPS 180)
Architecture: powerpc
config CRYPTO_SHA1_PPC_SPE
tristate "Hash functions: SHA-1 (SPE)"
depends on PPC && SPE
help
SHA-1 secure hash algorithm (FIPS 180)
Architecture: powerpc using
- SPE (Signal Processing Engine) extensions
config CRYPTO_SHA256_PPC_SPE
tristate "Hash functions: SHA-224 and SHA-256 (SPE)"
depends on PPC && SPE
select CRYPTO_SHA256
select CRYPTO_HASH
help
SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
Architecture: powerpc using
- SPE (Signal Processing Engine) extensions
config CRYPTO_AES_PPC_SPE
tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (SPE)"
depends on PPC && SPE
select CRYPTO_SKCIPHER
help
Block ciphers: AES cipher algorithms (FIPS-197)
Length-preserving ciphers: AES with ECB, CBC, CTR, and XTS modes
Architecture: powerpc using:
- SPE (Signal Processing Engine) extensions
SPE is available for:
- Processor Type: Freescale 8500
- CPU selection: e500 (8540)
This module should only be used for low power (router) devices
without hardware AES acceleration (e.g. caam crypto). It reduces the
size of the AES tables from 16KB to 8KB + 256 bytes and mitigates
timining attacks. Nevertheless it might be not as secure as other
architecture specific assembler implementations that work on 1KB
tables or 256 bytes S-boxes.
endmenu
# SPDX-License-Identifier: GPL-2.0
menu "Accelerated Cryptographic Algorithms for CPU (s390)"
config CRYPTO_CRC32_S390
tristate "CRC32c and CRC32"
depends on S390
select CRYPTO_HASH
select CRC32
help
CRC32c and CRC32 CRC algorithms
Architecture: s390
It is available with IBM z13 or later.
config CRYPTO_SHA512_S390
tristate "Hash functions: SHA-384 and SHA-512"
depends on S390
select CRYPTO_HASH
help
SHA-384 and SHA-512 secure hash algorithms (FIPS 180)
Architecture: s390
It is available as of z10.
config CRYPTO_SHA1_S390
tristate "Hash functions: SHA-1"
depends on S390
select CRYPTO_HASH
help
SHA-1 secure hash algorithm (FIPS 180)
Architecture: s390
It is available as of z990.
config CRYPTO_SHA256_S390
tristate "Hash functions: SHA-224 and SHA-256"
depends on S390
select CRYPTO_HASH
help
SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
Architecture: s390
It is available as of z9.
config CRYPTO_SHA3_256_S390
tristate "Hash functions: SHA3-224 and SHA3-256"
depends on S390
select CRYPTO_HASH
help
SHA3-224 and SHA3-256 secure hash algorithms (FIPS 202)
Architecture: s390
It is available as of z14.
config CRYPTO_SHA3_512_S390
tristate "Hash functions: SHA3-384 and SHA3-512"
depends on S390
select CRYPTO_HASH
help
SHA3-384 and SHA3-512 secure hash algorithms (FIPS 202)
Architecture: s390
It is available as of z14.
config CRYPTO_GHASH_S390
tristate "Hash functions: GHASH"
depends on S390
select CRYPTO_HASH
help
GCM GHASH hash function (NIST SP800-38D)
Architecture: s390
It is available as of z196.
config CRYPTO_AES_S390
tristate "Ciphers: AES, modes: ECB, CBC, CTR, XTS, GCM"
depends on S390
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
help
Block cipher: AES cipher algorithms (FIPS 197)
AEAD cipher: AES with GCM
Length-preserving ciphers: AES with ECB, CBC, XTS, and CTR modes
Architecture: s390
As of z9 the ECB and CBC modes are hardware accelerated
for 128 bit keys.
As of z10 the ECB and CBC modes are hardware accelerated
for all AES key sizes.
As of z196 the CTR mode is hardware accelerated for all AES
key sizes and XTS mode is hardware accelerated for 256 and
512 bit keys.
config CRYPTO_DES_S390
tristate "Ciphers: DES and Triple DES EDE, modes: ECB, CBC, CTR"
depends on S390
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
help
Block ciphers: DES (FIPS 46-2) cipher algorithm
Block ciphers: Triple DES EDE (FIPS 46-3) cipher algorithm
Length-preserving ciphers: DES with ECB, CBC, and CTR modes
Length-preserving ciphers: Triple DES EDED with ECB, CBC, and CTR modes
Architecture: s390
As of z990 the ECB and CBC mode are hardware accelerated.
As of z196 the CTR mode is hardware accelerated.
config CRYPTO_CHACHA_S390
tristate "Ciphers: ChaCha20"
depends on S390
select CRYPTO_SKCIPHER
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CHACHA
help
Length-preserving cipher: ChaCha20 stream cipher (RFC 7539)
Architecture: s390
It is available as of z13.
endmenu
# SPDX-License-Identifier: GPL-2.0
menu "Accelerated Cryptographic Algorithms for CPU (sparc64)"
config CRYPTO_DES_SPARC64
tristate "Ciphers: DES and Triple DES EDE, modes: ECB/CBC"
depends on SPARC64
select CRYPTO_ALGAPI
select CRYPTO_LIB_DES
select CRYPTO_SKCIPHER
help
Block cipher: DES (FIPS 46-2) cipher algorithm
Block cipher: Triple DES EDE (FIPS 46-3) cipher algorithm
Length-preserving ciphers: DES with ECB and CBC modes
Length-preserving ciphers: Tripe DES EDE with ECB and CBC modes
Architecture: sparc64
config CRYPTO_CRC32C_SPARC64
tristate "CRC32c"
depends on SPARC64
select CRYPTO_HASH
select CRC32
help
CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720)
Architecture: sparc64
config CRYPTO_MD5_SPARC64
tristate "Digests: MD5"
depends on SPARC64
select CRYPTO_MD5
select CRYPTO_HASH
help
MD5 message digest algorithm (RFC1321)
Architecture: sparc64 using crypto instructions, when available
config CRYPTO_SHA1_SPARC64
tristate "Hash functions: SHA-1"
depends on SPARC64
select CRYPTO_SHA1
select CRYPTO_HASH
help
SHA-1 secure hash algorithm (FIPS 180)
Architecture: sparc64
config CRYPTO_SHA256_SPARC64
tristate "Hash functions: SHA-224 and SHA-256"
depends on SPARC64
select CRYPTO_SHA256
select CRYPTO_HASH
help
SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
Architecture: sparc64 using crypto instructions, when available
config CRYPTO_SHA512_SPARC64
tristate "Hash functions: SHA-384 and SHA-512"
depends on SPARC64
select CRYPTO_SHA512
select CRYPTO_HASH
help
SHA-384 and SHA-512 secure hash algorithms (FIPS 180)
Architecture: sparc64 using crypto instructions, when available
config CRYPTO_AES_SPARC64
tristate "Ciphers: AES, modes: ECB, CBC, CTR"
depends on SPARC64
select CRYPTO_SKCIPHER
help
Block ciphers: AES cipher algorithms (FIPS-197)
Length-preseving ciphers: AES with ECB, CBC, and CTR modes
Architecture: sparc64 using crypto instructions
config CRYPTO_CAMELLIA_SPARC64
tristate "Ciphers: Camellia, modes: ECB, CBC"
depends on SPARC64
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
help
Block ciphers: Camellia cipher algorithms
Length-preserving ciphers: Camellia with ECB and CBC modes
Architecture: sparc64
endmenu
This diff is collapsed.
......@@ -100,6 +100,9 @@ sm4-aesni-avx-x86_64-y := sm4-aesni-avx-asm_64.o sm4_aesni_avx_glue.o
obj-$(CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64) += sm4-aesni-avx2-x86_64.o
sm4-aesni-avx2-x86_64-y := sm4-aesni-avx2-asm_64.o sm4_aesni_avx2_glue.o
obj-$(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64) += aria-aesni-avx-x86_64.o
aria-aesni-avx-x86_64-y := aria-aesni-avx-asm_64.o aria_aesni_avx_glue.o
quiet_cmd_perlasm = PERLASM $@
cmd_perlasm = $(PERL) $< > $@
$(obj)/%.S: $(src)/%.pl FORCE
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef ASM_X86_ARIA_AVX_H
#define ASM_X86_ARIA_AVX_H
#include <linux/types.h>
#define ARIA_AESNI_PARALLEL_BLOCKS 16
#define ARIA_AESNI_PARALLEL_BLOCK_SIZE (ARIA_BLOCK_SIZE * 16)
struct aria_avx_ops {
void (*aria_encrypt_16way)(const void *ctx, u8 *dst, const u8 *src);
void (*aria_decrypt_16way)(const void *ctx, u8 *dst, const u8 *src);
void (*aria_ctr_crypt_16way)(const void *ctx, u8 *dst, const u8 *src,
u8 *keystream, u8 *iv);
};
#endif
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Glue Code for the AVX/AES-NI/GFNI assembler implementation of the ARIA Cipher
*
* Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com>
*/
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <crypto/aria.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
#include "ecb_cbc_helpers.h"
#include "aria-avx.h"
asmlinkage void aria_aesni_avx_encrypt_16way(const void *ctx, u8 *dst,
const u8 *src);
asmlinkage void aria_aesni_avx_decrypt_16way(const void *ctx, u8 *dst,
const u8 *src);
asmlinkage void aria_aesni_avx_ctr_crypt_16way(const void *ctx, u8 *dst,
const u8 *src,
u8 *keystream, u8 *iv);
asmlinkage void aria_aesni_avx_gfni_encrypt_16way(const void *ctx, u8 *dst,
const u8 *src);
asmlinkage void aria_aesni_avx_gfni_decrypt_16way(const void *ctx, u8 *dst,
const u8 *src);
asmlinkage void aria_aesni_avx_gfni_ctr_crypt_16way(const void *ctx, u8 *dst,
const u8 *src,
u8 *keystream, u8 *iv);
static struct aria_avx_ops aria_ops;
static int ecb_do_encrypt(struct skcipher_request *req, const u32 *rkey)
{
ECB_WALK_START(req, ARIA_BLOCK_SIZE, ARIA_AESNI_PARALLEL_BLOCKS);
ECB_BLOCK(ARIA_AESNI_PARALLEL_BLOCKS, aria_ops.aria_encrypt_16way);
ECB_BLOCK(1, aria_encrypt);
ECB_WALK_END();
}
static int ecb_do_decrypt(struct skcipher_request *req, const u32 *rkey)
{
ECB_WALK_START(req, ARIA_BLOCK_SIZE, ARIA_AESNI_PARALLEL_BLOCKS);
ECB_BLOCK(ARIA_AESNI_PARALLEL_BLOCKS, aria_ops.aria_decrypt_16way);
ECB_BLOCK(1, aria_decrypt);
ECB_WALK_END();
}
static int aria_avx_ecb_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
return ecb_do_encrypt(req, ctx->enc_key[0]);
}
static int aria_avx_ecb_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
return ecb_do_decrypt(req, ctx->dec_key[0]);
}
static int aria_avx_set_key(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
return aria_set_key(&tfm->base, key, keylen);
}
static int aria_avx_ctr_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
while (nbytes >= ARIA_AESNI_PARALLEL_BLOCK_SIZE) {
u8 keystream[ARIA_AESNI_PARALLEL_BLOCK_SIZE];
kernel_fpu_begin();
aria_ops.aria_ctr_crypt_16way(ctx, dst, src, keystream,
walk.iv);
kernel_fpu_end();
dst += ARIA_AESNI_PARALLEL_BLOCK_SIZE;
src += ARIA_AESNI_PARALLEL_BLOCK_SIZE;
nbytes -= ARIA_AESNI_PARALLEL_BLOCK_SIZE;
}
while (nbytes >= ARIA_BLOCK_SIZE) {
u8 keystream[ARIA_BLOCK_SIZE];
memcpy(keystream, walk.iv, ARIA_BLOCK_SIZE);
crypto_inc(walk.iv, ARIA_BLOCK_SIZE);
aria_encrypt(ctx, keystream, keystream);
crypto_xor_cpy(dst, src, keystream, ARIA_BLOCK_SIZE);
dst += ARIA_BLOCK_SIZE;
src += ARIA_BLOCK_SIZE;
nbytes -= ARIA_BLOCK_SIZE;
}
if (walk.nbytes == walk.total && nbytes > 0) {
u8 keystream[ARIA_BLOCK_SIZE];
memcpy(keystream, walk.iv, ARIA_BLOCK_SIZE);
crypto_inc(walk.iv, ARIA_BLOCK_SIZE);
aria_encrypt(ctx, keystream, keystream);
crypto_xor_cpy(dst, src, keystream, nbytes);
dst += nbytes;
src += nbytes;
nbytes = 0;
}
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static struct skcipher_alg aria_algs[] = {
{
.base.cra_name = "__ecb(aria)",
.base.cra_driver_name = "__ecb-aria-avx",
.base.cra_priority = 400,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = ARIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = ARIA_MIN_KEY_SIZE,
.max_keysize = ARIA_MAX_KEY_SIZE,
.setkey = aria_avx_set_key,
.encrypt = aria_avx_ecb_encrypt,
.decrypt = aria_avx_ecb_decrypt,
}, {
.base.cra_name = "__ctr(aria)",
.base.cra_driver_name = "__ctr-aria-avx",
.base.cra_priority = 400,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = ARIA_MIN_KEY_SIZE,
.max_keysize = ARIA_MAX_KEY_SIZE,
.ivsize = ARIA_BLOCK_SIZE,
.chunksize = ARIA_BLOCK_SIZE,
.walksize = 16 * ARIA_BLOCK_SIZE,
.setkey = aria_avx_set_key,
.encrypt = aria_avx_ctr_encrypt,
.decrypt = aria_avx_ctr_encrypt,
}
};
static struct simd_skcipher_alg *aria_simd_algs[ARRAY_SIZE(aria_algs)];
static int __init aria_avx_init(void)
{
const char *feature_name;
if (!boot_cpu_has(X86_FEATURE_AVX) ||
!boot_cpu_has(X86_FEATURE_AES) ||
!boot_cpu_has(X86_FEATURE_OSXSAVE)) {
pr_info("AVX or AES-NI instructions are not detected.\n");
return -ENODEV;
}
if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
&feature_name)) {
pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
if (boot_cpu_has(X86_FEATURE_GFNI)) {
aria_ops.aria_encrypt_16way = aria_aesni_avx_gfni_encrypt_16way;
aria_ops.aria_decrypt_16way = aria_aesni_avx_gfni_decrypt_16way;
aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_gfni_ctr_crypt_16way;
} else {
aria_ops.aria_encrypt_16way = aria_aesni_avx_encrypt_16way;
aria_ops.aria_decrypt_16way = aria_aesni_avx_decrypt_16way;
aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_ctr_crypt_16way;
}
return simd_register_skciphers_compat(aria_algs,
ARRAY_SIZE(aria_algs),
aria_simd_algs);
}
static void __exit aria_avx_exit(void)
{
simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs),
aria_simd_algs);
}
module_init(aria_avx_init);
module_exit(aria_avx_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>");
MODULE_DESCRIPTION("ARIA Cipher Algorithm, AVX/AES-NI/GFNI optimized");
MODULE_ALIAS_CRYPTO("aria");
MODULE_ALIAS_CRYPTO("aria-aesni-avx");
......@@ -36,6 +36,7 @@
#include <linux/types.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
#include <asm/cpu_device_id.h>
#include <asm/simd.h>
asmlinkage void sha512_transform_ssse3(struct sha512_state *state,
......@@ -284,6 +285,13 @@ static int register_sha512_avx2(void)
ARRAY_SIZE(sha512_avx2_algs));
return 0;
}
static const struct x86_cpu_id module_cpu_ids[] = {
X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
static void unregister_sha512_avx2(void)
{
......@@ -294,6 +302,8 @@ static void unregister_sha512_avx2(void)
static int __init sha512_ssse3_mod_init(void)
{
if (!x86_match_cpu(module_cpu_ids))
return -ENODEV;
if (register_sha512_ssse3())
goto fail;
......
This diff is collapsed.
......@@ -149,7 +149,7 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
obj-$(CONFIG_CRYPTO_ARIA) += aria.o
obj-$(CONFIG_CRYPTO_ARIA) += aria_generic.o
obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o
obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
......
......@@ -120,6 +120,12 @@ static int akcipher_default_op(struct akcipher_request *req)
return -ENOSYS;
}
static int akcipher_default_set_key(struct crypto_akcipher *tfm,
const void *key, unsigned int keylen)
{
return -ENOSYS;
}
int crypto_register_akcipher(struct akcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
......@@ -132,6 +138,8 @@ int crypto_register_akcipher(struct akcipher_alg *alg)
alg->encrypt = akcipher_default_op;
if (!alg->decrypt)
alg->decrypt = akcipher_default_op;
if (!alg->set_priv_key)
alg->set_priv_key = akcipher_default_set_key;
akcipher_prepare_alg(alg);
return crypto_register_alg(base);
......
......@@ -997,77 +997,6 @@ void crypto_inc(u8 *a, unsigned int size)
}
EXPORT_SYMBOL_GPL(crypto_inc);
void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
{
int relalign = 0;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
int size = sizeof(unsigned long);
int d = (((unsigned long)dst ^ (unsigned long)src1) |
((unsigned long)dst ^ (unsigned long)src2)) &
(size - 1);
relalign = d ? 1 << __ffs(d) : size;
/*
* If we care about alignment, process as many bytes as
* needed to advance dst and src to values whose alignments
* equal their relative alignment. This will allow us to
* process the remainder of the input using optimal strides.
*/
while (((unsigned long)dst & (relalign - 1)) && len > 0) {
*dst++ = *src1++ ^ *src2++;
len--;
}
}
while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
u64 l = get_unaligned((u64 *)src1) ^
get_unaligned((u64 *)src2);
put_unaligned(l, (u64 *)dst);
} else {
*(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
}
dst += 8;
src1 += 8;
src2 += 8;
len -= 8;
}
while (len >= 4 && !(relalign & 3)) {
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
u32 l = get_unaligned((u32 *)src1) ^
get_unaligned((u32 *)src2);
put_unaligned(l, (u32 *)dst);
} else {
*(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
}
dst += 4;
src1 += 4;
src2 += 4;
len -= 4;
}
while (len >= 2 && !(relalign & 1)) {
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
u16 l = get_unaligned((u16 *)src1) ^
get_unaligned((u16 *)src2);
put_unaligned(l, (u16 *)dst);
} else {
*(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
}
dst += 2;
src1 += 2;
src2 += 2;
len -= 2;
}
while (len--)
*dst++ = *src1++ ^ *src2++;
}
EXPORT_SYMBOL_GPL(__crypto_xor);
unsigned int crypto_alg_extsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize +
......
......@@ -114,7 +114,7 @@ struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask)
larval->alg.cra_priority = -1;
larval->alg.cra_destroy = crypto_larval_destroy;
strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME);
strscpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME);
init_completion(&larval->completion);
return larval;
......@@ -321,7 +321,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
/*
* If the internal flag is set for a cipher, require a caller to
* to invoke the cipher with the internal flag to use that cipher.
* invoke the cipher with the internal flag to use that cipher.
* Also, if a caller wants to allocate a cipher that may or may
* not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and
* !(mask & CRYPTO_ALG_INTERNAL).
......
......@@ -16,6 +16,14 @@
#include <crypto/aria.h>
static const u32 key_rc[20] = {
0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0,
0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0,
0xdb92371d, 0x2126e970, 0x03249775, 0x04e8c90e,
0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0,
0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0
};
static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
......@@ -25,7 +33,7 @@ static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
const u32 *ck;
int rkidx = 0;
ck = &key_rc[(key_len - 16) / 8][0];
ck = &key_rc[(key_len - 16) / 2];
w0[0] = be32_to_cpu(key[0]);
w0[1] = be32_to_cpu(key[1]);
......@@ -163,8 +171,7 @@ static void aria_set_decrypt_key(struct aria_ctx *ctx)
}
}
static int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len)
{
struct aria_ctx *ctx = crypto_tfm_ctx(tfm);
......@@ -179,6 +186,7 @@ static int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return 0;
}
EXPORT_SYMBOL_GPL(aria_set_key);
static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
u32 key[][ARIA_RD_KEY_WORDS])
......@@ -235,14 +243,30 @@ static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
dst[3] = cpu_to_be32(reg3);
}
static void aria_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
void aria_encrypt(void *_ctx, u8 *out, const u8 *in)
{
struct aria_ctx *ctx = (struct aria_ctx *)_ctx;
__aria_crypt(ctx, out, in, ctx->enc_key);
}
EXPORT_SYMBOL_GPL(aria_encrypt);
void aria_decrypt(void *_ctx, u8 *out, const u8 *in)
{
struct aria_ctx *ctx = (struct aria_ctx *)_ctx;
__aria_crypt(ctx, out, in, ctx->dec_key);
}
EXPORT_SYMBOL_GPL(aria_decrypt);
static void __aria_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aria_ctx *ctx = crypto_tfm_ctx(tfm);
__aria_crypt(ctx, out, in, ctx->enc_key);
}
static void aria_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
static void __aria_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aria_ctx *ctx = crypto_tfm_ctx(tfm);
......@@ -263,8 +287,8 @@ static struct crypto_alg aria_alg = {
.cia_min_keysize = ARIA_MIN_KEY_SIZE,
.cia_max_keysize = ARIA_MAX_KEY_SIZE,
.cia_setkey = aria_set_key,
.cia_encrypt = aria_encrypt,
.cia_decrypt = aria_decrypt
.cia_encrypt = __aria_encrypt,
.cia_decrypt = __aria_decrypt
}
}
};
......@@ -286,3 +310,4 @@ MODULE_DESCRIPTION("ARIA Cipher Algorithm");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>");
MODULE_ALIAS_CRYPTO("aria");
MODULE_ALIAS_CRYPTO("aria-generic");
......@@ -189,7 +189,7 @@ static int test(int disks, int *tests)
}
static int raid6_test(void)
static int __init raid6_test(void)
{
int err = 0;
int tests = 0;
......@@ -236,7 +236,7 @@ static int raid6_test(void)
return 0;
}
static void raid6_test_exit(void)
static void __exit raid6_test_exit(void)
{
}
......
......@@ -72,12 +72,12 @@ static struct kpp_alg curve25519_alg = {
.max_size = curve25519_max_size,
};
static int curve25519_init(void)
static int __init curve25519_init(void)
{
return crypto_register_kpp(&curve25519_alg);
}
static void curve25519_exit(void)
static void __exit curve25519_exit(void)
{
crypto_unregister_kpp(&curve25519_alg);
}
......
......@@ -893,7 +893,7 @@ static struct crypto_template crypto_ffdhe_templates[] = {};
#endif /* CONFIG_CRYPTO_DH_RFC7919_GROUPS */
static int dh_init(void)
static int __init dh_init(void)
{
int err;
......@@ -911,7 +911,7 @@ static int dh_init(void)
return 0;
}
static void dh_exit(void)
static void __exit dh_exit(void)
{
crypto_unregister_templates(crypto_ffdhe_templates,
ARRAY_SIZE(crypto_ffdhe_templates));
......
......@@ -1703,7 +1703,7 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg)
static int drbg_fini_hash_kernel(struct drbg_state *drbg)
{
struct sdesc *sdesc = (struct sdesc *)drbg->priv_data;
struct sdesc *sdesc = drbg->priv_data;
if (sdesc) {
crypto_free_shash(sdesc->shash.tfm);
kfree_sensitive(sdesc);
......@@ -1715,7 +1715,7 @@ static int drbg_fini_hash_kernel(struct drbg_state *drbg)
static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg,
const unsigned char *key)
{
struct sdesc *sdesc = (struct sdesc *)drbg->priv_data;
struct sdesc *sdesc = drbg->priv_data;
crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg));
}
......@@ -1723,7 +1723,7 @@ static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg,
static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
const struct list_head *in)
{
struct sdesc *sdesc = (struct sdesc *)drbg->priv_data;
struct sdesc *sdesc = drbg->priv_data;
struct drbg_string *input = NULL;
crypto_shash_init(&sdesc->shash);
......@@ -1818,8 +1818,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
const unsigned char *key)
{
struct crypto_cipher *tfm =
(struct crypto_cipher *)drbg->priv_data;
struct crypto_cipher *tfm = drbg->priv_data;
crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
}
......@@ -1827,8 +1826,7 @@ static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
const struct drbg_string *in)
{
struct crypto_cipher *tfm =
(struct crypto_cipher *)drbg->priv_data;
struct crypto_cipher *tfm = drbg->priv_data;
/* there is only component in *in */
BUG_ON(in->len < drbg_blocklen(drbg));
......
......@@ -200,7 +200,7 @@ static struct kpp_alg ecdh_nist_p384 = {
static bool ecdh_nist_p192_registered;
static int ecdh_init(void)
static int __init ecdh_init(void)
{
int ret;
......@@ -227,7 +227,7 @@ static int ecdh_init(void)
return ret;
}
static void ecdh_exit(void)
static void __exit ecdh_exit(void)
{
if (ecdh_nist_p192_registered)
crypto_unregister_kpp(&ecdh_nist_p192);
......
......@@ -332,7 +332,7 @@ static struct akcipher_alg ecdsa_nist_p192 = {
};
static bool ecdsa_nist_p192_registered;
static int ecdsa_init(void)
static int __init ecdsa_init(void)
{
int ret;
......@@ -359,7 +359,7 @@ static int ecdsa_init(void)
return ret;
}
static void ecdsa_exit(void)
static void __exit ecdsa_exit(void)
{
if (ecdsa_nist_p192_registered)
crypto_unregister_akcipher(&ecdsa_nist_p192);
......
......@@ -543,7 +543,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
}
/* record the driver name so we can instantiate this exact algo later */
strlcpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name,
strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name,
CRYPTO_MAX_ALG_NAME);
/* Instance fields */
......
......@@ -327,7 +327,7 @@ static struct akcipher_alg rsa = {
},
};
static int rsa_init(void)
static int __init rsa_init(void)
{
int err;
......@@ -344,7 +344,7 @@ static int rsa_init(void)
return 0;
}
static void rsa_exit(void)
static void __exit rsa_exit(void)
{
crypto_unregister_template(&rsa_pkcs1pad_tmpl);
crypto_unregister_akcipher(&rsa);
......
......@@ -441,12 +441,12 @@ static struct akcipher_alg sm2 = {
},
};
static int sm2_init(void)
static int __init sm2_init(void)
{
return crypto_register_akcipher(&sm2);
}
static void sm2_exit(void)
static void __exit sm2_exit(void)
{
crypto_unregister_akcipher(&sm2);
}
......
......@@ -66,17 +66,6 @@ static u32 num_mb = 8;
static unsigned int klen;
static char *tvmem[TVMEMSIZE];
static const char *check[] = {
"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3",
"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
"khazad", "wp512", "wp384", "wp256", "xeta", "fcrypt",
"camellia", "seed", "rmd160", "aria",
"lzo", "lzo-rle", "cts", "sha3-224", "sha3-256", "sha3-384",
"sha3-512", "streebog256", "streebog512",
NULL
};
static const int block_sizes[] = { 16, 64, 128, 256, 1024, 1420, 4096, 0 };
static const int aead_sizes[] = { 16, 64, 256, 512, 1024, 1420, 4096, 8192, 0 };
......@@ -1454,18 +1443,6 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
false);
}
static void test_available(void)
{
const char **name = check;
while (*name) {
printk("alg %s ", *name);
printk(crypto_has_alg(*name, 0, 0) ?
"found\n" : "not found\n");
name++;
}
}
static inline int tcrypt_test(const char *alg)
{
int ret;
......@@ -2228,6 +2205,13 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
NULL, 0, 16, 8, speed_template_16_24_32);
break;
case 229:
test_mb_aead_speed("gcm(aria)", ENCRYPT, sec, NULL, 0, 16, 8,
speed_template_16, num_mb);
test_mb_aead_speed("gcm(aria)", DECRYPT, sec, NULL, 0, 16, 8,
speed_template_16, num_mb);
break;
case 300:
if (alg) {
test_hash_speed(alg, sec, generic_hash_speed_template);
......@@ -2648,6 +2632,17 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_16);
break;
case 519:
test_acipher_speed("ecb(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ecb(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
break;
case 600:
test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
......@@ -2860,9 +2855,17 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_8_32, num_mb);
break;
case 1000:
test_available();
case 610:
test_mb_skcipher_speed("ecb(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ecb(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
break;
}
return ret;
......
......@@ -3322,7 +3322,7 @@ static int test_comp(struct crypto_comp *tfm,
}
static int test_acomp(struct crypto_acomp *tfm,
const struct comp_testvec *ctemplate,
const struct comp_testvec *ctemplate,
const struct comp_testvec *dtemplate,
int ctcount, int dtcount)
{
......@@ -3417,6 +3417,21 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
crypto_init_wait(&wait);
sg_init_one(&src, input_vec, ilen);
acomp_request_set_params(req, &src, NULL, ilen, 0);
ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on NULL dst buffer test %d for %s: ret=%d\n",
i + 1, algo, -ret);
kfree(input_vec);
acomp_request_free(req);
goto out;
}
#endif
kfree(input_vec);
acomp_request_free(req);
}
......@@ -3478,6 +3493,20 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
crypto_init_wait(&wait);
acomp_request_set_params(req, &src, NULL, ilen, 0);
ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
if (ret) {
pr_err("alg: acomp: decompression failed on NULL dst buffer test %d for %s: ret=%d\n",
i + 1, algo, -ret);
kfree(input_vec);
acomp_request_free(req);
goto out;
}
#endif
kfree(input_vec);
acomp_request_free(req);
}
......@@ -5801,8 +5830,11 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
driver, alg,
fips_enabled ? "fips" : "panic_on_fail");
}
WARN(1, "alg: self-tests for %s (%s) failed (rc=%d)",
driver, alg, rc);
pr_warn("alg: self-tests for %s using %s failed (rc=%d)",
alg, driver, rc);
WARN(rc != -ENOENT,
"alg: self-tests for %s using %s failed (rc=%d)",
alg, driver, rc);
} else {
if (fips_enabled)
pr_info("alg: self-tests for %s (%s) passed\n",
......
......@@ -71,8 +71,6 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
MAX_BITS_PER_CALL);
arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res);
if ((int)res.a0 < 0)
return (int)res.a0;
switch ((int)res.a0) {
case SMCCC_RET_SUCCESS:
......@@ -88,6 +86,8 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
return copied;
cond_resched();
break;
default:
return -EIO;
}
}
......
......@@ -52,7 +52,7 @@ MODULE_PARM_DESC(default_quality,
static void drop_current_rng(void);
static int hwrng_init(struct hwrng *rng);
static void hwrng_manage_rngd(struct hwrng *rng);
static int hwrng_fillfn(void *unused);
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int wait);
......@@ -96,6 +96,15 @@ static int set_current_rng(struct hwrng *rng)
drop_current_rng();
current_rng = rng;
/* if necessary, start hwrng thread */
if (!hwrng_fill) {
hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
if (IS_ERR(hwrng_fill)) {
pr_err("hwrng_fill thread creation failed\n");
hwrng_fill = NULL;
}
}
return 0;
}
......@@ -167,8 +176,6 @@ static int hwrng_init(struct hwrng *rng)
rng->quality = 1024;
current_quality = rng->quality; /* obsolete */
hwrng_manage_rngd(rng);
return 0;
}
......@@ -454,10 +461,6 @@ static ssize_t rng_quality_store(struct device *dev,
/* the best available RNG may have changed */
ret = enable_best_rng();
/* start/stop rngd if necessary */
if (current_rng)
hwrng_manage_rngd(current_rng);
out:
mutex_unlock(&rng_mutex);
return ret ? ret : len;
......@@ -507,16 +510,14 @@ static int hwrng_fillfn(void *unused)
rng->quality = current_quality; /* obsolete */
quality = rng->quality;
mutex_unlock(&reading_mutex);
put_rng(rng);
if (!quality)
break;
if (rc <= 0)
hwrng_msleep(rng, 10000);
if (rc <= 0) {
pr_warn("hwrng: no data available\n");
msleep_interruptible(10000);
put_rng(rng);
if (rc <= 0)
continue;
}
/* If we cannot credit at least one bit of entropy,
* keep track of the remainder for the next iteration
......@@ -533,22 +534,6 @@ static int hwrng_fillfn(void *unused)
return 0;
}
static void hwrng_manage_rngd(struct hwrng *rng)
{
if (WARN_ON(!mutex_is_locked(&rng_mutex)))
return;
if (rng->quality == 0 && hwrng_fill)
kthread_stop(hwrng_fill);
if (rng->quality > 0 && !hwrng_fill) {
hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
if (IS_ERR(hwrng_fill)) {
pr_err("hwrng_fill thread creation failed\n");
hwrng_fill = NULL;
}
}
}
int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
......@@ -570,6 +555,7 @@ int hwrng_register(struct hwrng *rng)
init_completion(&rng->cleanup_done);
complete(&rng->cleanup_done);
init_completion(&rng->dying);
if (!current_rng ||
(!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
......@@ -617,6 +603,7 @@ void hwrng_unregister(struct hwrng *rng)
old_rng = current_rng;
list_del(&rng->list);
complete_all(&rng->dying);
if (current_rng == rng) {
err = enable_best_rng();
if (err) {
......@@ -685,6 +672,14 @@ void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
}
EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
{
unsigned long timeout = msecs_to_jiffies(msecs) + 1;
return wait_for_completion_interruptible_timeout(&rng->dying, timeout);
}
EXPORT_SYMBOL_GPL(hwrng_msleep);
static int __init hwrng_modinit(void)
{
int ret;
......
......@@ -245,7 +245,7 @@ static int imx_rngc_probe(struct platform_device *pdev)
if (IS_ERR(rngc->base))
return PTR_ERR(rngc->base);
rngc->clk = devm_clk_get(&pdev->dev, NULL);
rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(rngc->clk)) {
dev_err(&pdev->dev, "Can not get rng_clk\n");
return PTR_ERR(rngc->clk);
......@@ -255,27 +255,14 @@ static int imx_rngc_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
ret = clk_prepare_enable(rngc->clk);
if (ret)
return ret;
ver_id = readl(rngc->base + RNGC_VER_ID);
rng_type = ver_id >> RNGC_TYPE_SHIFT;
/*
* This driver supports only RNGC and RNGB. (There's a different
* driver for RNGA.)
*/
if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) {
ret = -ENODEV;
goto err;
}
ret = devm_request_irq(&pdev->dev,
irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
if (ret) {
dev_err(rngc->dev, "Can't get interrupt working.\n");
goto err;
}
if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB)
return -ENODEV;
init_completion(&rngc->rng_op_done);
......@@ -290,18 +277,25 @@ static int imx_rngc_probe(struct platform_device *pdev)
imx_rngc_irq_mask_clear(rngc);
ret = devm_request_irq(&pdev->dev,
irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
if (ret) {
dev_err(rngc->dev, "Can't get interrupt working.\n");
return ret;
}
if (self_test) {
ret = imx_rngc_self_test(rngc);
if (ret) {
dev_err(rngc->dev, "self test failed\n");
goto err;
return ret;
}
}
ret = hwrng_register(&rngc->rng);
ret = devm_hwrng_register(&pdev->dev, &rngc->rng);
if (ret) {
dev_err(&pdev->dev, "hwrng registration failed\n");
goto err;
return ret;
}
dev_info(&pdev->dev,
......@@ -309,22 +303,6 @@ static int imx_rngc_probe(struct platform_device *pdev)
rng_type == RNGC_TYPE_RNGB ? 'B' : 'C',
(ver_id >> RNGC_VER_MAJ_SHIFT) & 0xff, ver_id & 0xff);
return 0;
err:
clk_disable_unprepare(rngc->clk);
return ret;
}
static int __exit imx_rngc_remove(struct platform_device *pdev)
{
struct imx_rngc *rngc = platform_get_drvdata(pdev);
hwrng_unregister(&rngc->rng);
clk_disable_unprepare(rngc->clk);
return 0;
}
static int __maybe_unused imx_rngc_suspend(struct device *dev)
......@@ -355,11 +333,10 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
static struct platform_driver imx_rngc_driver = {
.driver = {
.name = "imx_rngc",
.name = KBUILD_MODNAME,
.pm = &imx_rngc_pm_ops,
.of_match_table = imx_rngc_dt_ids,
},
.remove = __exit_p(imx_rngc_remove),
};
module_platform_driver_probe(imx_rngc_driver, imx_rngc_probe);
......
......@@ -802,9 +802,7 @@ source "drivers/crypto/amlogic/Kconfig"
config CRYPTO_DEV_SA2UL
tristate "Support for TI security accelerator"
depends on ARCH_K3 || COMPILE_TEST
select ARM64_CRYPTO
select CRYPTO_AES
select CRYPTO_AES_ARM64
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
select CRYPTO_SHA1
......@@ -818,5 +816,6 @@ config CRYPTO_DEV_SA2UL
acceleration for cryptographic algorithms on these devices.
source "drivers/crypto/keembay/Kconfig"
source "drivers/crypto/aspeed/Kconfig"
endif # CRYPTO_HW
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_ALLWINNER) += allwinner/
obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed/
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
......
......@@ -235,7 +235,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
#endif
};
static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v)
static int sun4i_ss_debugfs_show(struct seq_file *seq, void *v)
{
unsigned int i;
......@@ -266,19 +266,7 @@ static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v)
}
return 0;
}
static int sun4i_ss_dbgfs_open(struct inode *inode, struct file *file)
{
return single_open(file, sun4i_ss_dbgfs_read, inode->i_private);
}
static const struct file_operations sun4i_ss_debugfs_fops = {
.owner = THIS_MODULE,
.open = sun4i_ss_dbgfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
DEFINE_SHOW_ATTRIBUTE(sun4i_ss_debugfs);
/*
* Power management strategy: The device is suspended unless a TFM exists for
......
......@@ -54,11 +54,9 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa
goto err_dst;
}
err = pm_runtime_get_sync(ce->dev);
if (err < 0) {
pm_runtime_put_noidle(ce->dev);
err = pm_runtime_resume_and_get(ce->dev);
if (err < 0)
goto err_pm;
}
mutex_lock(&ce->rnglock);
chan = &ce->chanlist[flow];
......
......@@ -177,7 +177,7 @@ static int meson_cipher(struct skcipher_request *areq)
if (areq->src == areq->dst) {
nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
DMA_BIDIRECTIONAL);
if (nr_sgs < 0) {
if (!nr_sgs) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
err = -EINVAL;
goto theend;
......@@ -186,14 +186,14 @@ static int meson_cipher(struct skcipher_request *areq)
} else {
nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
DMA_TO_DEVICE);
if (nr_sgs < 0 || nr_sgs > MAXDESC - 3) {
if (!nr_sgs || nr_sgs > MAXDESC - 3) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
err = -EINVAL;
goto theend;
}
nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst),
DMA_FROM_DEVICE);
if (nr_sgd < 0 || nr_sgd > MAXDESC - 3) {
if (!nr_sgd || nr_sgd > MAXDESC - 3) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd);
err = -EINVAL;
goto theend;
......
config CRYPTO_DEV_ASPEED
tristate "Support for Aspeed cryptographic engine driver"
depends on ARCH_ASPEED || COMPILE_TEST
select CRYPTO_ENGINE
help
Hash and Crypto Engine (HACE) is designed to accelerate the
throughput of hash data digest, encryption and decryption.
Select y here to have support for the cryptographic driver
available on Aspeed SoC.
config CRYPTO_DEV_ASPEED_DEBUG
bool "Enable Aspeed crypto debug messages"
depends on CRYPTO_DEV_ASPEED
help
Print Aspeed crypto debugging messages if you use this
option to ask for those messages.
Avoid enabling this option for production build to
minimize driver timing.
config CRYPTO_DEV_ASPEED_HACE_HASH
bool "Enable Aspeed Hash & Crypto Engine (HACE) hash"
depends on CRYPTO_DEV_ASPEED
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_HMAC
help
Select here to enable Aspeed Hash & Crypto Engine (HACE)
hash driver.
Supports multiple message digest standards, including
SHA-1, SHA-224, SHA-256, SHA-384, SHA-512, and so on.
config CRYPTO_DEV_ASPEED_HACE_CRYPTO
bool "Enable Aspeed Hash & Crypto Engine (HACE) crypto"
depends on CRYPTO_DEV_ASPEED
select CRYPTO_AES
select CRYPTO_DES
select CRYPTO_ECB
select CRYPTO_CBC
select CRYPTO_CFB
select CRYPTO_OFB
select CRYPTO_CTR
help
Select here to enable Aspeed Hash & Crypto Engine (HACE)
crypto driver.
Supports AES/DES symmetric-key encryption and decryption
with ECB/CBC/CFB/OFB/CTR options.
hace-hash-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) := aspeed-hace-hash.o
hace-crypto-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO) := aspeed-hace-crypto.o
obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed_crypto.o
aspeed_crypto-objs := aspeed-hace.o \
$(hace-hash-y) \
$(hace-crypto-y)
This diff is collapsed.
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2021 Aspeed Technology Inc.
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include "aspeed-hace.h"
#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
#define HACE_DBG(d, fmt, ...) \
dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
#else
#define HACE_DBG(d, fmt, ...) \
dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
#endif
/* HACE interrupt service routine */
static irqreturn_t aspeed_hace_irq(int irq, void *dev)
{
struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)dev;
struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
u32 sts;
sts = ast_hace_read(hace_dev, ASPEED_HACE_STS);
ast_hace_write(hace_dev, sts, ASPEED_HACE_STS);
HACE_DBG(hace_dev, "irq status: 0x%x\n", sts);
if (sts & HACE_HASH_ISR) {
if (hash_engine->flags & CRYPTO_FLAGS_BUSY)
tasklet_schedule(&hash_engine->done_task);
else
dev_warn(hace_dev->dev, "HASH no active requests.\n");
}
if (sts & HACE_CRYPTO_ISR) {
if (crypto_engine->flags & CRYPTO_FLAGS_BUSY)
tasklet_schedule(&crypto_engine->done_task);
else
dev_warn(hace_dev->dev, "CRYPTO no active requests.\n");
}
return IRQ_HANDLED;
}
static void aspeed_hace_crypto_done_task(unsigned long data)
{
struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data;
struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
crypto_engine->resume(hace_dev);
}
static void aspeed_hace_hash_done_task(unsigned long data)
{
struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data;
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
hash_engine->resume(hace_dev);
}
static void aspeed_hace_register(struct aspeed_hace_dev *hace_dev)
{
#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH
aspeed_register_hace_hash_algs(hace_dev);
#endif
#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO
aspeed_register_hace_crypto_algs(hace_dev);
#endif
}
static void aspeed_hace_unregister(struct aspeed_hace_dev *hace_dev)
{
#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH
aspeed_unregister_hace_hash_algs(hace_dev);
#endif
#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO
aspeed_unregister_hace_crypto_algs(hace_dev);
#endif
}
static const struct of_device_id aspeed_hace_of_matches[] = {
{ .compatible = "aspeed,ast2500-hace", .data = (void *)5, },
{ .compatible = "aspeed,ast2600-hace", .data = (void *)6, },
{},
};
static int aspeed_hace_probe(struct platform_device *pdev)
{
struct aspeed_engine_crypto *crypto_engine;
const struct of_device_id *hace_dev_id;
struct aspeed_engine_hash *hash_engine;
struct aspeed_hace_dev *hace_dev;
struct resource *res;
int rc;
hace_dev = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_hace_dev),
GFP_KERNEL);
if (!hace_dev)
return -ENOMEM;
hace_dev_id = of_match_device(aspeed_hace_of_matches, &pdev->dev);
if (!hace_dev_id) {
dev_err(&pdev->dev, "Failed to match hace dev id\n");
return -EINVAL;
}
hace_dev->dev = &pdev->dev;
hace_dev->version = (unsigned long)hace_dev_id->data;
hash_engine = &hace_dev->hash_engine;
crypto_engine = &hace_dev->crypto_engine;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
platform_set_drvdata(pdev, hace_dev);
hace_dev->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hace_dev->regs))
return PTR_ERR(hace_dev->regs);
/* Get irq number and register it */
hace_dev->irq = platform_get_irq(pdev, 0);
if (hace_dev->irq < 0)
return -ENXIO;
rc = devm_request_irq(&pdev->dev, hace_dev->irq, aspeed_hace_irq, 0,
dev_name(&pdev->dev), hace_dev);
if (rc) {
dev_err(&pdev->dev, "Failed to request interrupt\n");
return rc;
}
/* Get clk and enable it */
hace_dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(hace_dev->clk)) {
dev_err(&pdev->dev, "Failed to get clk\n");
return -ENODEV;
}
rc = clk_prepare_enable(hace_dev->clk);
if (rc) {
dev_err(&pdev->dev, "Failed to enable clock 0x%x\n", rc);
return rc;
}
/* Initialize crypto hardware engine structure for hash */
hace_dev->crypt_engine_hash = crypto_engine_alloc_init(hace_dev->dev,
true);
if (!hace_dev->crypt_engine_hash) {
rc = -ENOMEM;
goto clk_exit;
}
rc = crypto_engine_start(hace_dev->crypt_engine_hash);
if (rc)
goto err_engine_hash_start;
tasklet_init(&hash_engine->done_task, aspeed_hace_hash_done_task,
(unsigned long)hace_dev);
/* Initialize crypto hardware engine structure for crypto */
hace_dev->crypt_engine_crypto = crypto_engine_alloc_init(hace_dev->dev,
true);
if (!hace_dev->crypt_engine_crypto) {
rc = -ENOMEM;
goto err_engine_hash_start;
}
rc = crypto_engine_start(hace_dev->crypt_engine_crypto);
if (rc)
goto err_engine_crypto_start;
tasklet_init(&crypto_engine->done_task, aspeed_hace_crypto_done_task,
(unsigned long)hace_dev);
/* Allocate DMA buffer for hash engine input used */
hash_engine->ahash_src_addr =
dmam_alloc_coherent(&pdev->dev,
ASPEED_HASH_SRC_DMA_BUF_LEN,
&hash_engine->ahash_src_dma_addr,
GFP_KERNEL);
if (!hash_engine->ahash_src_addr) {
dev_err(&pdev->dev, "Failed to allocate dma buffer\n");
rc = -ENOMEM;
goto err_engine_crypto_start;
}
/* Allocate DMA buffer for crypto engine context used */
crypto_engine->cipher_ctx =
dmam_alloc_coherent(&pdev->dev,
PAGE_SIZE,
&crypto_engine->cipher_ctx_dma,
GFP_KERNEL);
if (!crypto_engine->cipher_ctx) {
dev_err(&pdev->dev, "Failed to allocate cipher ctx dma\n");
rc = -ENOMEM;
goto err_engine_crypto_start;
}
/* Allocate DMA buffer for crypto engine input used */
crypto_engine->cipher_addr =
dmam_alloc_coherent(&pdev->dev,
ASPEED_CRYPTO_SRC_DMA_BUF_LEN,
&crypto_engine->cipher_dma_addr,
GFP_KERNEL);
if (!crypto_engine->cipher_addr) {
dev_err(&pdev->dev, "Failed to allocate cipher addr dma\n");
rc = -ENOMEM;
goto err_engine_crypto_start;
}
/* Allocate DMA buffer for crypto engine output used */
if (hace_dev->version == AST2600_VERSION) {
crypto_engine->dst_sg_addr =
dmam_alloc_coherent(&pdev->dev,
ASPEED_CRYPTO_DST_DMA_BUF_LEN,
&crypto_engine->dst_sg_dma_addr,
GFP_KERNEL);
if (!crypto_engine->dst_sg_addr) {
dev_err(&pdev->dev, "Failed to allocate dst_sg dma\n");
rc = -ENOMEM;
goto err_engine_crypto_start;
}
}
aspeed_hace_register(hace_dev);
dev_info(&pdev->dev, "Aspeed Crypto Accelerator successfully registered\n");
return 0;
err_engine_crypto_start:
crypto_engine_exit(hace_dev->crypt_engine_crypto);
err_engine_hash_start:
crypto_engine_exit(hace_dev->crypt_engine_hash);
clk_exit:
clk_disable_unprepare(hace_dev->clk);
return rc;
}
static int aspeed_hace_remove(struct platform_device *pdev)
{
struct aspeed_hace_dev *hace_dev = platform_get_drvdata(pdev);
struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
aspeed_hace_unregister(hace_dev);
crypto_engine_exit(hace_dev->crypt_engine_hash);
crypto_engine_exit(hace_dev->crypt_engine_crypto);
tasklet_kill(&hash_engine->done_task);
tasklet_kill(&crypto_engine->done_task);
clk_disable_unprepare(hace_dev->clk);
return 0;
}
MODULE_DEVICE_TABLE(of, aspeed_hace_of_matches);
static struct platform_driver aspeed_hace_driver = {
.probe = aspeed_hace_probe,
.remove = aspeed_hace_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = aspeed_hace_of_matches,
},
};
module_platform_driver(aspeed_hace_driver);
MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
MODULE_DESCRIPTION("Aspeed HACE driver Crypto Accelerator");
MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef __ASPEED_HACE_H__
#define __ASPEED_HACE_H__
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/dma-mapping.h>
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/des.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/kpp.h>
#include <crypto/internal/skcipher.h>
#include <crypto/algapi.h>
#include <crypto/engine.h>
#include <crypto/hmac.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
/*****************************
* *
* HACE register definitions *
* *
* ***************************/
#define ASPEED_HACE_SRC 0x00 /* Crypto Data Source Base Address Register */
#define ASPEED_HACE_DEST 0x04 /* Crypto Data Destination Base Address Register */
#define ASPEED_HACE_CONTEXT 0x08 /* Crypto Context Buffer Base Address Register */
#define ASPEED_HACE_DATA_LEN 0x0C /* Crypto Data Length Register */
#define ASPEED_HACE_CMD 0x10 /* Crypto Engine Command Register */
/* G5 */
#define ASPEED_HACE_TAG 0x18 /* HACE Tag Register */
/* G6 */
#define ASPEED_HACE_GCM_ADD_LEN 0x14 /* Crypto AES-GCM Additional Data Length Register */
#define ASPEED_HACE_GCM_TAG_BASE_ADDR 0x18 /* Crypto AES-GCM Tag Write Buff Base Address Reg */
#define ASPEED_HACE_STS 0x1C /* HACE Status Register */
#define ASPEED_HACE_HASH_SRC 0x20 /* Hash Data Source Base Address Register */
#define ASPEED_HACE_HASH_DIGEST_BUFF 0x24 /* Hash Digest Write Buffer Base Address Register */
#define ASPEED_HACE_HASH_KEY_BUFF 0x28 /* Hash HMAC Key Buffer Base Address Register */
#define ASPEED_HACE_HASH_DATA_LEN 0x2C /* Hash Data Length Register */
#define ASPEED_HACE_HASH_CMD 0x30 /* Hash Engine Command Register */
/* crypto cmd */
#define HACE_CMD_SINGLE_DES 0
#define HACE_CMD_TRIPLE_DES BIT(17)
#define HACE_CMD_AES_SELECT 0
#define HACE_CMD_DES_SELECT BIT(16)
#define HACE_CMD_ISR_EN BIT(12)
#define HACE_CMD_CONTEXT_SAVE_ENABLE (0)
#define HACE_CMD_CONTEXT_SAVE_DISABLE BIT(9)
#define HACE_CMD_AES (0)
#define HACE_CMD_DES (0)
#define HACE_CMD_RC4 BIT(8)
#define HACE_CMD_DECRYPT (0)
#define HACE_CMD_ENCRYPT BIT(7)
#define HACE_CMD_ECB (0x0 << 4)
#define HACE_CMD_CBC (0x1 << 4)
#define HACE_CMD_CFB (0x2 << 4)
#define HACE_CMD_OFB (0x3 << 4)
#define HACE_CMD_CTR (0x4 << 4)
#define HACE_CMD_OP_MODE_MASK (0x7 << 4)
#define HACE_CMD_AES128 (0x0 << 2)
#define HACE_CMD_AES192 (0x1 << 2)
#define HACE_CMD_AES256 (0x2 << 2)
#define HACE_CMD_OP_CASCADE (0x3)
#define HACE_CMD_OP_INDEPENDENT (0x1)
/* G5 */
#define HACE_CMD_RI_WO_DATA_ENABLE (0)
#define HACE_CMD_RI_WO_DATA_DISABLE BIT(11)
#define HACE_CMD_CONTEXT_LOAD_ENABLE (0)
#define HACE_CMD_CONTEXT_LOAD_DISABLE BIT(10)
/* G6 */
#define HACE_CMD_AES_KEY_FROM_OTP BIT(24)
#define HACE_CMD_GHASH_TAG_XOR_EN BIT(23)
#define HACE_CMD_GHASH_PAD_LEN_INV BIT(22)
#define HACE_CMD_GCM_TAG_ADDR_SEL BIT(21)
#define HACE_CMD_MBUS_REQ_SYNC_EN BIT(20)
#define HACE_CMD_DES_SG_CTRL BIT(19)
#define HACE_CMD_SRC_SG_CTRL BIT(18)
#define HACE_CMD_CTR_IV_AES_96 (0x1 << 14)
#define HACE_CMD_CTR_IV_DES_32 (0x1 << 14)
#define HACE_CMD_CTR_IV_AES_64 (0x2 << 14)
#define HACE_CMD_CTR_IV_AES_32 (0x3 << 14)
#define HACE_CMD_AES_KEY_HW_EXP BIT(13)
#define HACE_CMD_GCM (0x5 << 4)
/* interrupt status reg */
#define HACE_CRYPTO_ISR BIT(12)
#define HACE_HASH_ISR BIT(9)
#define HACE_HASH_BUSY BIT(0)
/* hash cmd reg */
#define HASH_CMD_MBUS_REQ_SYNC_EN BIT(20)
#define HASH_CMD_HASH_SRC_SG_CTRL BIT(18)
#define HASH_CMD_SHA512_224 (0x3 << 10)
#define HASH_CMD_SHA512_256 (0x2 << 10)
#define HASH_CMD_SHA384 (0x1 << 10)
#define HASH_CMD_SHA512 (0)
#define HASH_CMD_INT_ENABLE BIT(9)
#define HASH_CMD_HMAC (0x1 << 7)
#define HASH_CMD_ACC_MODE (0x2 << 7)
#define HASH_CMD_HMAC_KEY (0x3 << 7)
#define HASH_CMD_SHA1 (0x2 << 4)
#define HASH_CMD_SHA224 (0x4 << 4)
#define HASH_CMD_SHA256 (0x5 << 4)
#define HASH_CMD_SHA512_SER (0x6 << 4)
#define HASH_CMD_SHA_SWAP (0x2 << 2)
#define HASH_SG_LAST_LIST BIT(31)
#define CRYPTO_FLAGS_BUSY BIT(1)
#define SHA_OP_UPDATE 1
#define SHA_OP_FINAL 2
#define SHA_FLAGS_SHA1 BIT(0)
#define SHA_FLAGS_SHA224 BIT(1)
#define SHA_FLAGS_SHA256 BIT(2)
#define SHA_FLAGS_SHA384 BIT(3)
#define SHA_FLAGS_SHA512 BIT(4)
#define SHA_FLAGS_SHA512_224 BIT(5)
#define SHA_FLAGS_SHA512_256 BIT(6)
#define SHA_FLAGS_HMAC BIT(8)
#define SHA_FLAGS_FINUP BIT(9)
#define SHA_FLAGS_MASK (0xff)
#define ASPEED_CRYPTO_SRC_DMA_BUF_LEN 0xa000
#define ASPEED_CRYPTO_DST_DMA_BUF_LEN 0xa000
#define ASPEED_CRYPTO_GCM_TAG_OFFSET 0x9ff0
#define ASPEED_HASH_SRC_DMA_BUF_LEN 0xa000
#define ASPEED_HASH_QUEUE_LENGTH 50
#define HACE_CMD_IV_REQUIRE (HACE_CMD_CBC | HACE_CMD_CFB | \
HACE_CMD_OFB | HACE_CMD_CTR)
struct aspeed_hace_dev;
typedef int (*aspeed_hace_fn_t)(struct aspeed_hace_dev *);
struct aspeed_sg_list {
__le32 len;
__le32 phy_addr;
};
struct aspeed_engine_hash {
struct tasklet_struct done_task;
unsigned long flags;
struct ahash_request *req;
/* input buffer */
void *ahash_src_addr;
dma_addr_t ahash_src_dma_addr;
dma_addr_t src_dma;
dma_addr_t digest_dma;
size_t src_length;
/* callback func */
aspeed_hace_fn_t resume;
aspeed_hace_fn_t dma_prepare;
};
struct aspeed_sha_hmac_ctx {
struct crypto_shash *shash;
u8 ipad[SHA512_BLOCK_SIZE];
u8 opad[SHA512_BLOCK_SIZE];
};
struct aspeed_sham_ctx {
struct crypto_engine_ctx enginectx;
struct aspeed_hace_dev *hace_dev;
unsigned long flags; /* hmac flag */
struct aspeed_sha_hmac_ctx base[0];
};
struct aspeed_sham_reqctx {
unsigned long flags; /* final update flag should no use*/
unsigned long op; /* final or update */
u32 cmd; /* trigger cmd */
/* walk state */
struct scatterlist *src_sg;
int src_nents;
unsigned int offset; /* offset in current sg */
unsigned int total; /* per update length */
size_t digsize;
size_t block_size;
size_t ivsize;
const __be32 *sha_iv;
/* remain data buffer */
u8 buffer[SHA512_BLOCK_SIZE * 2];
dma_addr_t buffer_dma_addr;
size_t bufcnt; /* buffer counter */
/* output buffer */
u8 digest[SHA512_DIGEST_SIZE] __aligned(64);
dma_addr_t digest_dma_addr;
u64 digcnt[2];
};
struct aspeed_engine_crypto {
struct tasklet_struct done_task;
unsigned long flags;
struct skcipher_request *req;
/* context buffer */
void *cipher_ctx;
dma_addr_t cipher_ctx_dma;
/* input buffer, could be single/scatter-gather lists */
void *cipher_addr;
dma_addr_t cipher_dma_addr;
/* output buffer, only used in scatter-gather lists */
void *dst_sg_addr;
dma_addr_t dst_sg_dma_addr;
/* callback func */
aspeed_hace_fn_t resume;
};
struct aspeed_cipher_ctx {
struct crypto_engine_ctx enginectx;
struct aspeed_hace_dev *hace_dev;
int key_len;
u8 key[AES_MAX_KEYLENGTH];
/* callback func */
aspeed_hace_fn_t start;
struct crypto_skcipher *fallback_tfm;
};
struct aspeed_cipher_reqctx {
int enc_cmd;
int src_nents;
int dst_nents;
struct skcipher_request fallback_req; /* keep at the end */
};
struct aspeed_hace_dev {
void __iomem *regs;
struct device *dev;
int irq;
struct clk *clk;
unsigned long version;
struct crypto_engine *crypt_engine_hash;
struct crypto_engine *crypt_engine_crypto;
struct aspeed_engine_hash hash_engine;
struct aspeed_engine_crypto crypto_engine;
};
struct aspeed_hace_alg {
struct aspeed_hace_dev *hace_dev;
const char *alg_base;
union {
struct skcipher_alg skcipher;
struct ahash_alg ahash;
} alg;
};
enum aspeed_version {
AST2500_VERSION = 5,
AST2600_VERSION
};
#define ast_hace_write(hace, val, offset) \
writel((val), (hace)->regs + (offset))
#define ast_hace_read(hace, offset) \
readl((hace)->regs + (offset))
void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev);
void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev);
void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev);
void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev);
#endif
......@@ -1712,7 +1712,7 @@ static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
cipher_len = regk_crypto_key_256;
break;
default:
pr_err("%s: Invalid key length %d!\n",
pr_err("%s: Invalid key length %zu!\n",
MODULE_NAME, ctx->key_length);
return -EINVAL;
}
......@@ -2091,7 +2091,7 @@ static void artpec6_crypto_task(unsigned long data)
return;
}
spin_lock_bh(&ac->queue_lock);
spin_lock(&ac->queue_lock);
list_for_each_entry_safe(req, n, &ac->pending, list) {
struct artpec6_crypto_dma_descriptors *dma = req->dma;
......@@ -2128,7 +2128,7 @@ static void artpec6_crypto_task(unsigned long data)
artpec6_crypto_process_queue(ac, &complete_in_progress);
spin_unlock_bh(&ac->queue_lock);
spin_unlock(&ac->queue_lock);
/* Perform the completion callbacks without holding the queue lock
* to allow new request submissions from the callbacks.
......
......@@ -1928,7 +1928,7 @@ static int ahash_enqueue(struct ahash_request *req)
/* SPU2 hardware does not compute hash of zero length data */
if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
(iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
alg_name = crypto_ahash_alg_name(tfm);
flow_log("Doing %sfinal %s zero-len hash request in software\n",
rctx->is_final ? "" : "non-", alg_name);
err = do_shash((unsigned char *)alg_name, req->result,
......@@ -2029,7 +2029,7 @@ static int ahash_init(struct ahash_request *req)
* supported by the hardware, we need to handle it in software
* by calling synchronous hash functions.
*/
alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
alg_name = crypto_ahash_alg_name(tfm);
hash = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(hash)) {
ret = PTR_ERR(hash);
......
......@@ -231,7 +231,7 @@ struct iproc_ctx_s {
/*
* shash descriptor - needed to perform incremental hashing in
* in software, when hw doesn't support it.
* software, when hw doesn't support it.
*/
struct shash_desc *shash;
......
......@@ -396,7 +396,7 @@ union cptx_vqx_misc_ena_w1s {
* Word0
* reserved_20_63:44 [63:20] Reserved.
* dbell_cnt:20 [19:0](R/W/H) Number of instruction queue 64-bit words to add
* to the CPT instruction doorbell count. Readback value is the the
* to the CPT instruction doorbell count. Readback value is the
* current number of pending doorbell requests. If counter overflows
* CPT()_VQ()_MISC_INT[DBELL_DOVF] is set. To reset the count back to
* zero, write one to clear CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF],
......
......@@ -253,6 +253,7 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
const struct firmware *fw_entry;
struct device *dev = &cpt->pdev->dev;
struct ucode_header *ucode;
unsigned int code_length;
struct microcode *mcode;
int j, ret = 0;
......@@ -263,11 +264,12 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
ucode = (struct ucode_header *)fw_entry->data;
mcode = &cpt->mcode[cpt->next_mc_idx];
memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
mcode->code_size = ntohl(ucode->code_length) * 2;
if (!mcode->code_size) {
code_length = ntohl(ucode->code_length);
if (code_length == 0 || code_length >= INT_MAX / 2) {
ret = -EINVAL;
goto fw_release;
}
mcode->code_size = code_length * 2;
mcode->is_ae = is_ae;
mcode->core_mask = 0ULL;
......
......@@ -198,22 +198,16 @@ static int zip_decompress(const u8 *src, unsigned int slen,
/* Legacy Compress framework start */
int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm)
{
int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
ret = zip_ctx_init(zip_ctx, 0);
return ret;
return zip_ctx_init(zip_ctx, 0);
}
int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm)
{
int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
ret = zip_ctx_init(zip_ctx, 1);
return ret;
return zip_ctx_init(zip_ctx, 1);
}
void zip_free_comp_ctx(struct crypto_tfm *tfm)
......@@ -227,24 +221,18 @@ int zip_comp_compress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
ret = zip_compress(src, slen, dst, dlen, zip_ctx);
return ret;
return zip_compress(src, slen, dst, dlen, zip_ctx);
}
int zip_comp_decompress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
return ret;
return zip_decompress(src, slen, dst, dlen, zip_ctx);
} /* Legacy compress framework end */
/* SCOMP framework start */
......@@ -298,22 +286,16 @@ int zip_scomp_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
int ret;
struct zip_kernel_ctx *zip_ctx = ctx;
ret = zip_compress(src, slen, dst, dlen, zip_ctx);
return ret;
return zip_compress(src, slen, dst, dlen, zip_ctx);
}
int zip_scomp_decompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
int ret;
struct zip_kernel_ctx *zip_ctx = ctx;
ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
return ret;
return zip_decompress(src, slen, dst, dlen, zip_ctx);
} /* SCOMP framework end */
......@@ -64,7 +64,6 @@ static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
int ret;
if (!ctx->u.des3.key_len)
return -EINVAL;
......@@ -100,9 +99,7 @@ static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
rctx->cmd.u.des3.src_len = req->cryptlen;
rctx->cmd.u.des3.dst = req->dst;
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
return ret;
return ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
}
static int ccp_des3_encrypt(struct skcipher_request *req)
......
......@@ -641,6 +641,10 @@ static void ccp_dma_release(struct ccp_device *ccp)
for (i = 0; i < ccp->cmd_q_count; i++) {
chan = ccp->ccp_dma_chan + i;
dma_chan = &chan->dma_chan;
if (dma_chan->client_count)
dma_release_channel(dma_chan);
tasklet_kill(&chan->cleanup_tasklet);
list_del_rcu(&dma_chan->device_node);
}
......@@ -766,8 +770,8 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
if (!dmaengine)
return;
dma_async_device_unregister(dma_dev);
ccp_dma_release(ccp);
dma_async_device_unregister(dma_dev);
kmem_cache_destroy(ccp->dma_desc_cache);
kmem_cache_destroy(ccp->dma_cmd_cache);
......
This diff is collapsed.
......@@ -274,7 +274,7 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
}
ret = dma_map_sg(dev, sg, *nents, direction);
if (dma_mapping_error(dev, ret)) {
if (!ret) {
*nents = 0;
dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
return -ENOMEM;
......
......@@ -22,7 +22,8 @@ enum {
HPRE_CLUSTER0,
HPRE_CLUSTER1,
HPRE_CLUSTER2,
HPRE_CLUSTER3
HPRE_CLUSTER3,
HPRE_CLUSTERS_NUM_MAX
};
enum hpre_ctrl_dbgfs_file {
......@@ -42,9 +43,6 @@ enum hpre_dfx_dbgfs_file {
HPRE_DFX_FILE_NUM
};
#define HPRE_CLUSTERS_NUM_V2 (HPRE_CLUSTER3 + 1)
#define HPRE_CLUSTERS_NUM_V3 1
#define HPRE_CLUSTERS_NUM_MAX HPRE_CLUSTERS_NUM_V2
#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM_MAX - 1)
struct hpre_debugfs_file {
......@@ -105,5 +103,5 @@ struct hpre_sqe {
struct hisi_qp *hpre_create_qp(u8 type);
int hpre_algs_register(struct hisi_qm *qm);
void hpre_algs_unregister(struct hisi_qm *qm);
bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg);
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -81,7 +81,8 @@ struct hisi_zip_sqe {
u32 rsvd1[4];
};
int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node);
int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
int hisi_zip_register_to_crypto(struct hisi_qm *qm);
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg);
#endif
This diff is collapsed.
This diff is collapsed.
......@@ -42,7 +42,7 @@ config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
config CRYPTO_DEV_KEEMBAY_OCS_ECC
tristate "Support for Intel Keem Bay OCS ECC HW acceleration"
depends on ARCH_KEEMBAY || COMPILE_TEST
depends on OF || COMPILE_TEST
depends on OF
depends on HAS_IOMEM
select CRYPTO_ECDH
select CRYPTO_ENGINE
......@@ -64,7 +64,7 @@ config CRYPTO_DEV_KEEMBAY_OCS_HCU
select CRYPTO_ENGINE
depends on HAS_IOMEM
depends on ARCH_KEEMBAY || COMPILE_TEST
depends on OF || COMPILE_TEST
depends on OF
help
Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) Hash
Control Unit (HCU) hardware acceleration for use with Crypto API.
......
......@@ -403,7 +403,7 @@ union otx_cptx_pf_exe_bist_status {
* big-endian format in memory.
* iqb_ldwb:1 [7:7](R/W) Instruction load don't write back.
* 0 = The hardware issues NCB transient load (LDT) towards the cache,
* which if the line hits and is is dirty will cause the line to be
* which if the line hits and is dirty will cause the line to be
* written back before being replaced.
* 1 = The hardware issues NCB LDWB read-and-invalidate command towards
* the cache when fetching the last word of instructions; as a result the
......
......@@ -1494,7 +1494,7 @@ static void n2_unregister_algs(void)
*
* So we have to back-translate, going through the 'intr' and 'ino'
* property tables of the n2cp MDESC node, matching it with the OF
* 'interrupts' property entries, in order to to figure out which
* 'interrupts' property entries, in order to figure out which
* devino goes to which already-translated IRQ.
*/
static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment