Commit eeb7a893 authored by Palmer Dabbelt's avatar Palmer Dabbelt

Merge patch series "riscv: mm: Extend mappable memory up to hint address"

Charlie Jenkins <charlie@rivosinc.com> says:

On riscv, mmap currently returns an address from the largest address
space that can fit entirely inside of the hint address. This makes it
such that the hint address is almost never returned. This patch raises
the mappable area up to and including the hint address. This allows mmap
to often return the hint address, which allows a performance improvement
over searching for a valid address as well as making the behavior more
similar to other architectures.

Note that a previous patch introduced stronger semantics compared to
other architectures for riscv mmap. On riscv, mmap will not use bits in
the upper bits of the virtual address depending on the hint address. On
other architectures, a random address is returned in the address space
requested. On all architectures the hint address will be returned if it
is available. This allows riscv applications to configure how many bits
in the virtual address should be left empty. This has the two benefits
of being able to request address spaces that are smaller than the
default and doesn't require the application to know the page table
layout of riscv.

* b4-shazam-merge:
  docs: riscv: Define behavior of mmap
  selftests: riscv: Generalize mm selftests
  riscv: mm: Use hint address in mmap if available

Link: https://lore.kernel.org/r/20240130-use_mmap_hint_address-v3-0-8a655cfa8bcb@rivosinc.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parents 2b2ca354 cd6c916c
......@@ -144,14 +144,8 @@ passing 0 into the hint address parameter of mmap. On CPUs with an address space
smaller than sv48, the CPU maximum supported address space will be the default.
Software can "opt-in" to receiving VAs from another VA space by providing
a hint address to mmap. A hint address passed to mmap will cause the largest
address space that fits entirely into the hint to be used, unless there is no
space left in the address space. If there is no space available in the requested
address space, an address in the next smallest available address space will be
returned.
For example, in order to obtain 48-bit VA space, a hint address greater than
:code:`1 << 47` must be provided. Note that this is 47 due to sv48 userspace
ending at :code:`1 << 47` and the addresses beyond this are reserved for the
kernel. Similarly, to obtain 57-bit VA space addresses, a hint address greater
than or equal to :code:`1 << 56` must be provided.
a hint address to mmap. When a hint address is passed to mmap, the returned
address will never use more bits than the hint address. For example, if a hint
address of `1 << 40` is passed to mmap, a valid returned address will never use
bits 41 through 63. If no mappable addresses are available in that range, mmap
will return `MAP_FAILED`.
......@@ -44,6 +44,7 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
CONFIG_CPUFREQ_DT=y
CONFIG_ACPI_CPPC_CPUFREQ=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
CONFIG_ACPI=y
......
......@@ -3,14 +3,14 @@
menu "Accelerated Cryptographic Algorithms for CPU (riscv)"
config CRYPTO_AES_RISCV64
tristate "Ciphers: AES, modes: ECB, CBC, CTR, XTS"
tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XTS"
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
select CRYPTO_ALGAPI
select CRYPTO_LIB_AES
select CRYPTO_SKCIPHER
help
Block cipher: AES cipher algorithms
Length-preserving ciphers: AES with ECB, CBC, CTR, XTS
Length-preserving ciphers: AES with ECB, CBC, CTS, CTR, XTS
Architecture: riscv64 using:
- Zvkned vector crypto extension
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* AES using the RISC-V vector crypto extensions. Includes the bare block
* cipher and the ECB, CBC, CTR, and XTS modes.
* cipher and the ECB, CBC, CBC-CTS, CTR, and XTS modes.
*
* Copyright (C) 2023 VRULL GmbH
* Author: Heiko Stuebner <heiko.stuebner@vrull.eu>
*
* Copyright (C) 2023 SiFive, Inc.
* Author: Jerry Shih <jerry.shih@sifive.com>
*
* Copyright 2024 Google LLC
*/
#include <asm/simd.h>
......@@ -40,6 +42,10 @@ asmlinkage void aes_cbc_decrypt_zvkned(const struct crypto_aes_ctx *key,
const u8 *in, u8 *out, size_t len,
u8 iv[AES_BLOCK_SIZE]);
asmlinkage void aes_cbc_cts_crypt_zvkned(const struct crypto_aes_ctx *key,
const u8 *in, u8 *out, size_t len,
const u8 iv[AES_BLOCK_SIZE], bool enc);
asmlinkage void aes_ctr32_crypt_zvkned_zvkb(const struct crypto_aes_ctx *key,
const u8 *in, u8 *out, size_t len,
u8 iv[AES_BLOCK_SIZE]);
......@@ -164,7 +170,7 @@ static int riscv64_aes_ecb_decrypt(struct skcipher_request *req)
/* AES-CBC */
static inline int riscv64_aes_cbc_crypt(struct skcipher_request *req, bool enc)
static int riscv64_aes_cbc_crypt(struct skcipher_request *req, bool enc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
......@@ -202,6 +208,70 @@ static int riscv64_aes_cbc_decrypt(struct skcipher_request *req)
return riscv64_aes_cbc_crypt(req, false);
}
/* AES-CBC-CTS */
static int riscv64_aes_cbc_cts_crypt(struct skcipher_request *req, bool enc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct scatterlist sg_src[2], sg_dst[2];
struct skcipher_request subreq;
struct scatterlist *src, *dst;
struct skcipher_walk walk;
unsigned int cbc_len;
int err;
if (req->cryptlen < AES_BLOCK_SIZE)
return -EINVAL;
err = skcipher_walk_virt(&walk, req, false);
if (err)
return err;
/*
* If the full message is available in one step, decrypt it in one call
* to the CBC-CTS assembly function. This reduces overhead, especially
* on short messages. Otherwise, fall back to doing CBC up to the last
* two blocks, then invoke CTS just for the ciphertext stealing.
*/
if (unlikely(walk.nbytes != req->cryptlen)) {
cbc_len = round_down(req->cryptlen - AES_BLOCK_SIZE - 1,
AES_BLOCK_SIZE);
skcipher_walk_abort(&walk);
skcipher_request_set_tfm(&subreq, tfm);
skcipher_request_set_callback(&subreq,
skcipher_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(&subreq, req->src, req->dst,
cbc_len, req->iv);
err = riscv64_aes_cbc_crypt(&subreq, enc);
if (err)
return err;
dst = src = scatterwalk_ffwd(sg_src, req->src, cbc_len);
if (req->dst != req->src)
dst = scatterwalk_ffwd(sg_dst, req->dst, cbc_len);
skcipher_request_set_crypt(&subreq, src, dst,
req->cryptlen - cbc_len, req->iv);
err = skcipher_walk_virt(&walk, &subreq, false);
if (err)
return err;
}
kernel_vector_begin();
aes_cbc_cts_crypt_zvkned(ctx, walk.src.virt.addr, walk.dst.virt.addr,
walk.nbytes, req->iv, enc);
kernel_vector_end();
return skcipher_walk_done(&walk, 0);
}
static int riscv64_aes_cbc_cts_encrypt(struct skcipher_request *req)
{
return riscv64_aes_cbc_cts_crypt(req, true);
}
static int riscv64_aes_cbc_cts_decrypt(struct skcipher_request *req)
{
return riscv64_aes_cbc_cts_crypt(req, false);
}
/* AES-CTR */
static int riscv64_aes_ctr_crypt(struct skcipher_request *req)
......@@ -434,6 +504,22 @@ static struct skcipher_alg riscv64_zvkned_aes_skcipher_algs[] = {
.cra_driver_name = "cbc-aes-riscv64-zvkned",
.cra_module = THIS_MODULE,
},
}, {
.setkey = riscv64_aes_setkey_skcipher,
.encrypt = riscv64_aes_cbc_cts_encrypt,
.decrypt = riscv64_aes_cbc_cts_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.walksize = 4 * AES_BLOCK_SIZE, /* matches LMUL=4 */
.base = {
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_priority = 300,
.cra_name = "cts(cbc(aes))",
.cra_driver_name = "cts-cbc-aes-riscv64-zvkned",
.cra_module = THIS_MODULE,
},
}
};
......@@ -540,11 +626,12 @@ static void __exit riscv64_aes_mod_exit(void)
module_init(riscv64_aes_mod_init);
module_exit(riscv64_aes_mod_exit);
MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS (RISC-V accelerated)");
MODULE_DESCRIPTION("AES-ECB/CBC/CTS/CTR/XTS (RISC-V accelerated)");
MODULE_AUTHOR("Jerry Shih <jerry.shih@sifive.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("aes");
MODULE_ALIAS_CRYPTO("ecb(aes)");
MODULE_ALIAS_CRYPTO("cbc(aes)");
MODULE_ALIAS_CRYPTO("cts(cbc(aes))");
MODULE_ALIAS_CRYPTO("ctr(aes)");
MODULE_ALIAS_CRYPTO("xts(aes)");
......@@ -139,19 +139,25 @@ SYM_FUNC_END(aes_ecb_decrypt_zvkned)
.endm
.macro aes_cbc_decrypt keylen
srli LEN, LEN, 2 // Convert LEN from bytes to words
vle32.v v16, (IVP) // Load IV
1:
vle32.v v17, (INP) // Load ciphertext block
vmv.v.v v18, v17 // Save ciphertext block
aes_decrypt v17, \keylen // Decrypt
vxor.vv v17, v17, v16 // XOR with IV or prev ciphertext block
vse32.v v17, (OUTP) // Store plaintext block
vmv.v.v v16, v18 // Next "IV" is prev ciphertext block
addi INP, INP, 16
addi OUTP, OUTP, 16
addi LEN, LEN, -16
vsetvli t0, LEN, e32, m4, ta, ma
vle32.v v20, (INP) // Load ciphertext blocks
vslideup.vi v16, v20, 4 // Setup prev ciphertext blocks
addi t1, t0, -4
vslidedown.vx v24, v20, t1 // Save last ciphertext block
aes_decrypt v20, \keylen // Decrypt the blocks
vxor.vv v20, v20, v16 // XOR with prev ciphertext blocks
vse32.v v20, (OUTP) // Store plaintext blocks
vmv.v.v v16, v24 // Next "IV" is last ciphertext block
slli t1, t0, 2 // Words to bytes
add INP, INP, t1
add OUTP, OUTP, t1
sub LEN, LEN, t0
bnez LEN, 1b
vsetivli zero, 4, e32, m1, ta, ma
vse32.v v16, (IVP) // Store next IV
ret
.endm
......@@ -178,3 +184,156 @@ SYM_FUNC_START(aes_cbc_decrypt_zvkned)
192:
aes_cbc_decrypt 192
SYM_FUNC_END(aes_cbc_decrypt_zvkned)
.macro aes_cbc_cts_encrypt keylen
// CBC-encrypt all blocks except the last. But don't store the
// second-to-last block to the output buffer yet, since it will be
// handled specially in the ciphertext stealing step. Exception: if the
// message is single-block, still encrypt the last (and only) block.
li t0, 16
j 2f
1:
vse32.v v16, (OUTP) // Store ciphertext block
addi OUTP, OUTP, 16
2:
vle32.v v17, (INP) // Load plaintext block
vxor.vv v16, v16, v17 // XOR with IV or prev ciphertext block
aes_encrypt v16, \keylen // Encrypt
addi INP, INP, 16
addi LEN, LEN, -16
bgt LEN, t0, 1b // Repeat if more than one block remains
// Special case: if the message is a single block, just do CBC.
beqz LEN, .Lcts_encrypt_done\@
// Encrypt the last two blocks using ciphertext stealing as follows:
// C[n-1] = Encrypt(Encrypt(P[n-1] ^ C[n-2]) ^ P[n])
// C[n] = Encrypt(P[n-1] ^ C[n-2])[0..LEN]
//
// C[i] denotes the i'th ciphertext block, and likewise P[i] the i'th
// plaintext block. Block n, the last block, may be partial; its length
// is 1 <= LEN <= 16. If there are only 2 blocks, C[n-2] means the IV.
//
// v16 already contains Encrypt(P[n-1] ^ C[n-2]).
// INP points to P[n]. OUTP points to where C[n-1] should go.
// To support in-place encryption, load P[n] before storing C[n].
addi t0, OUTP, 16 // Get pointer to where C[n] should go
vsetvli zero, LEN, e8, m1, tu, ma
vle8.v v17, (INP) // Load P[n]
vse8.v v16, (t0) // Store C[n]
vxor.vv v16, v16, v17 // v16 = Encrypt(P[n-1] ^ C[n-2]) ^ P[n]
vsetivli zero, 4, e32, m1, ta, ma
aes_encrypt v16, \keylen
.Lcts_encrypt_done\@:
vse32.v v16, (OUTP) // Store C[n-1] (or C[n] in single-block case)
ret
.endm
#define LEN32 t4 // Length of remaining full blocks in 32-bit words
#define LEN_MOD16 t5 // Length of message in bytes mod 16
.macro aes_cbc_cts_decrypt keylen
andi LEN32, LEN, ~15
srli LEN32, LEN32, 2
andi LEN_MOD16, LEN, 15
// Save C[n-2] in v28 so that it's available later during the ciphertext
// stealing step. If there are fewer than three blocks, C[n-2] means
// the IV, otherwise it means the third-to-last ciphertext block.
vmv.v.v v28, v16 // IV
add t0, LEN, -33
bltz t0, .Lcts_decrypt_loop\@
andi t0, t0, ~15
add t0, t0, INP
vle32.v v28, (t0)
// CBC-decrypt all full blocks. For the last full block, or the last 2
// full blocks if the message is block-aligned, this doesn't write the
// correct output blocks (unless the message is only a single block),
// because it XORs the wrong values with the raw AES plaintexts. But we
// fix this after this loop without redoing the AES decryptions. This
// approach allows more of the AES decryptions to be parallelized.
.Lcts_decrypt_loop\@:
vsetvli t0, LEN32, e32, m4, ta, ma
addi t1, t0, -4
vle32.v v20, (INP) // Load next set of ciphertext blocks
vmv.v.v v24, v16 // Get IV or last ciphertext block of prev set
vslideup.vi v24, v20, 4 // Setup prev ciphertext blocks
vslidedown.vx v16, v20, t1 // Save last ciphertext block of this set
aes_decrypt v20, \keylen // Decrypt this set of blocks
vxor.vv v24, v24, v20 // XOR prev ciphertext blocks with decrypted blocks
vse32.v v24, (OUTP) // Store this set of plaintext blocks
sub LEN32, LEN32, t0
slli t0, t0, 2 // Words to bytes
add INP, INP, t0
add OUTP, OUTP, t0
bnez LEN32, .Lcts_decrypt_loop\@
vsetivli zero, 4, e32, m4, ta, ma
vslidedown.vx v20, v20, t1 // Extract raw plaintext of last full block
addi t0, OUTP, -16 // Get pointer to last full plaintext block
bnez LEN_MOD16, .Lcts_decrypt_non_block_aligned\@
// Special case: if the message is a single block, just do CBC.
li t1, 16
beq LEN, t1, .Lcts_decrypt_done\@
// Block-aligned message. Just fix up the last 2 blocks. We need:
//
// P[n-1] = Decrypt(C[n]) ^ C[n-2]
// P[n] = Decrypt(C[n-1]) ^ C[n]
//
// We have C[n] in v16, Decrypt(C[n]) in v20, and C[n-2] in v28.
// Together with Decrypt(C[n-1]) ^ C[n-2] from the output buffer, this
// is everything needed to fix the output without re-decrypting blocks.
addi t1, OUTP, -32 // Get pointer to where P[n-1] should go
vxor.vv v20, v20, v28 // Decrypt(C[n]) ^ C[n-2] == P[n-1]
vle32.v v24, (t1) // Decrypt(C[n-1]) ^ C[n-2]
vse32.v v20, (t1) // Store P[n-1]
vxor.vv v20, v24, v16 // Decrypt(C[n-1]) ^ C[n-2] ^ C[n] == P[n] ^ C[n-2]
j .Lcts_decrypt_finish\@
.Lcts_decrypt_non_block_aligned\@:
// Decrypt the last two blocks using ciphertext stealing as follows:
//
// P[n-1] = Decrypt(C[n] || Decrypt(C[n-1])[LEN_MOD16..16]) ^ C[n-2]
// P[n] = (Decrypt(C[n-1]) ^ C[n])[0..LEN_MOD16]
//
// We already have Decrypt(C[n-1]) in v20 and C[n-2] in v28.
vmv.v.v v16, v20 // v16 = Decrypt(C[n-1])
vsetvli zero, LEN_MOD16, e8, m1, tu, ma
vle8.v v20, (INP) // v20 = C[n] || Decrypt(C[n-1])[LEN_MOD16..16]
vxor.vv v16, v16, v20 // v16 = Decrypt(C[n-1]) ^ C[n]
vse8.v v16, (OUTP) // Store P[n]
vsetivli zero, 4, e32, m1, ta, ma
aes_decrypt v20, \keylen // v20 = Decrypt(C[n] || Decrypt(C[n-1])[LEN_MOD16..16])
.Lcts_decrypt_finish\@:
vxor.vv v20, v20, v28 // XOR with C[n-2]
vse32.v v20, (t0) // Store last full plaintext block
.Lcts_decrypt_done\@:
ret
.endm
.macro aes_cbc_cts_crypt keylen
vle32.v v16, (IVP) // Load IV
beqz a5, .Lcts_decrypt\@
aes_cbc_cts_encrypt \keylen
.Lcts_decrypt\@:
aes_cbc_cts_decrypt \keylen
.endm
// void aes_cbc_cts_crypt_zvkned(const struct crypto_aes_ctx *key,
// const u8 *in, u8 *out, size_t len,
// const u8 iv[16], bool enc);
//
// Encrypts or decrypts a message with the CS3 variant of AES-CBC-CTS.
// This is the variant that unconditionally swaps the last two blocks.
SYM_FUNC_START(aes_cbc_cts_crypt_zvkned)
aes_begin KEYP, 128f, 192f
aes_cbc_cts_crypt 256
128:
aes_cbc_cts_crypt 128
192:
aes_cbc_cts_crypt 192
SYM_FUNC_END(aes_cbc_cts_crypt_zvkned)
......@@ -17,7 +17,6 @@
#endif
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#define __atomic_acquire_fence() \
__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
......@@ -207,7 +206,7 @@ static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int
" add %[rc], %[p], %[a]\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [a]"r" (a), [u]"r" (u)
......@@ -228,7 +227,7 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
" add %[rc], %[p], %[a]\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [a]"r" (a), [u]"r" (u)
......@@ -248,7 +247,7 @@ static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
" addi %[rc], %[p], 1\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
......@@ -268,7 +267,7 @@ static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
" addi %[rc], %[p], -1\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
......@@ -288,7 +287,7 @@ static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
" bltz %[rc], 1f\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
......@@ -310,7 +309,7 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
" addi %[rc], %[p], 1\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
......@@ -331,7 +330,7 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
" addi %[rc], %[p], -1\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
......@@ -352,7 +351,7 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
" bltz %[rc], 1f\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
" fence rw, rw\n"
RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
......
......@@ -11,28 +11,27 @@
#define _ASM_RISCV_BARRIER_H
#ifndef __ASSEMBLY__
#include <asm/fence.h>
#define nop() __asm__ __volatile__ ("nop")
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
#define nops(n) __asm__ __volatile__ (__nops(n))
#define RISCV_FENCE(p, s) \
__asm__ __volatile__ ("fence " #p "," #s : : : "memory")
/* These barriers need to enforce ordering on both devices or memory. */
#define mb() RISCV_FENCE(iorw,iorw)
#define rmb() RISCV_FENCE(ir,ir)
#define wmb() RISCV_FENCE(ow,ow)
#define __mb() RISCV_FENCE(iorw, iorw)
#define __rmb() RISCV_FENCE(ir, ir)
#define __wmb() RISCV_FENCE(ow, ow)
/* These barriers do not need to enforce ordering on devices, just memory. */
#define __smp_mb() RISCV_FENCE(rw,rw)
#define __smp_rmb() RISCV_FENCE(r,r)
#define __smp_wmb() RISCV_FENCE(w,w)
#define __smp_mb() RISCV_FENCE(rw, rw)
#define __smp_rmb() RISCV_FENCE(r, r)
#define __smp_wmb() RISCV_FENCE(w, w)
#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
RISCV_FENCE(rw,w); \
RISCV_FENCE(rw, w); \
WRITE_ONCE(*p, v); \
} while (0)
......@@ -40,7 +39,7 @@ do { \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
RISCV_FENCE(r,rw); \
RISCV_FENCE(r, rw); \
___p1; \
})
......@@ -69,7 +68,7 @@ do { \
* instances the scheduler pairs this with an mb(), so nothing is necessary on
* the new hart.
*/
#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
#define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw)
#include <asm-generic/barrier.h>
......
......@@ -8,7 +8,6 @@
#include <linux/bug.h>
#include <asm/barrier.h>
#include <asm/fence.h>
#define __xchg_relaxed(ptr, new, size) \
......@@ -313,7 +312,7 @@
" bne %0, %z3, 1f\n" \
" sc.w.rl %1, %z4, %2\n" \
" bnez %1, 0b\n" \
" fence rw, rw\n" \
RISCV_FULL_BARRIER \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" ((long)__old), "rJ" (__new) \
......@@ -325,7 +324,7 @@
" bne %0, %z3, 1f\n" \
" sc.d.rl %1, %z4, %2\n" \
" bnez %1, 0b\n" \
" fence rw, rw\n" \
RISCV_FULL_BARRIER \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
......
......@@ -14,9 +14,28 @@
static inline int is_compat_task(void)
{
if (!IS_ENABLED(CONFIG_COMPAT))
return 0;
return test_thread_flag(TIF_32BIT);
}
static inline int is_compat_thread(struct thread_info *thread)
{
if (!IS_ENABLED(CONFIG_COMPAT))
return 0;
return test_ti_thread_flag(thread, TIF_32BIT);
}
static inline void set_compat_task(bool is_compat)
{
if (is_compat)
set_thread_flag(TIF_32BIT);
else
clear_thread_flag(TIF_32BIT);
}
struct compat_user_regs_struct {
compat_ulong_t pc;
compat_ulong_t ra;
......
......@@ -53,13 +53,9 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr);
#define ELF_ET_DYN_BASE ((DEFAULT_MAP_WINDOW / 3) * 2)
#ifdef CONFIG_64BIT
#ifdef CONFIG_COMPAT
#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
#define STACK_RND_MASK (is_compat_task() ? \
0x7ff >> (PAGE_SHIFT - 12) : \
0x3ffff >> (PAGE_SHIFT - 12))
#else
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
#endif
#endif
/*
......@@ -139,10 +135,7 @@ do { \
#ifdef CONFIG_COMPAT
#define SET_PERSONALITY(ex) \
do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
set_thread_flag(TIF_32BIT); \
else \
clear_thread_flag(TIF_32BIT); \
do { set_compat_task((ex).e_ident[EI_CLASS] == ELFCLASS32); \
if (personality(current->personality) != PER_LINUX32) \
set_personality(PER_LINUX | \
(current->personality & (~PER_MASK))); \
......
#ifndef _ASM_RISCV_FENCE_H
#define _ASM_RISCV_FENCE_H
#define RISCV_FENCE_ASM(p, s) "\tfence " #p "," #s "\n"
#define RISCV_FENCE(p, s) \
({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
#ifdef CONFIG_SMP
#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
#define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r, rw)
#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw, w)
#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw, rw)
#else
#define RISCV_ACQUIRE_BARRIER
#define RISCV_RELEASE_BARRIER
#define RISCV_FULL_BARRIER
#endif
#endif /* _ASM_RISCV_FENCE_H */
......@@ -47,10 +47,10 @@
* sufficient to ensure this works sanely on controllers that support I/O
* writes.
*/
#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory");
#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory");
#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory");
#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory");
#define __io_pbr() RISCV_FENCE(io, i)
#define __io_par(v) RISCV_FENCE(i, ior)
#define __io_pbw() RISCV_FENCE(iow, o)
#define __io_paw() RISCV_FENCE(o, io)
/*
* Accesses from a single hart to a single I/O address must be ordered. This
......
......@@ -12,6 +12,7 @@
#define _ASM_RISCV_MMIO_H
#include <linux/types.h>
#include <asm/fence.h>
#include <asm/mmiowb.h>
/* Generic IO read/write. These perform native-endian accesses. */
......@@ -131,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* doesn't define any ordering between the memory space and the I/O space.
*/
#define __io_br() do {} while (0)
#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); })
#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
#define __io_ar(v) RISCV_FENCE(i, ir)
#define __io_bw() RISCV_FENCE(w, o)
#define __io_aw() mmiowb_set_pending()
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
......
......@@ -7,7 +7,7 @@
* "o,w" is sufficient to ensure that all writes to the device have completed
* before the write to the spinlock is allowed to commit.
*/
#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
#define mmiowb() RISCV_FENCE(o, w)
#include <linux/smp.h>
#include <asm-generic/mmiowb.h>
......
......@@ -127,16 +127,10 @@
#define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1))
#define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1))
#ifdef CONFIG_COMPAT
#define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
#define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39)
#define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64)
#define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64)
#else
#define MMAP_VA_BITS ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
#define MMAP_MIN_VA_BITS (VA_BITS_SV39)
#endif /* CONFIG_COMPAT */
#else
#include <asm/pgtable-32.h>
#endif /* CONFIG_64BIT */
......@@ -439,6 +433,12 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte;
}
#ifdef CONFIG_RISCV_ISA_SVNAPOT
#define pte_leaf_size(pte) (pte_napot(pte) ? \
napot_cont_size(napot_cont_order(pte)) :\
PAGE_SIZE)
#endif
#ifdef CONFIG_NUMA_BALANCING
/*
* See the comment in include/asm-generic/pgtable.h
......@@ -513,12 +513,12 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
WRITE_ONCE(*ptep, pteval);
}
void flush_icache_pte(pte_t pte);
void flush_icache_pte(struct mm_struct *mm, pte_t pte);
static inline void __set_pte_at(pte_t *ptep, pte_t pteval)
static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval)
{
if (pte_present(pteval) && pte_exec(pteval))
flush_icache_pte(pteval);
flush_icache_pte(mm, pteval);
set_pte(ptep, pteval);
}
......@@ -529,7 +529,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
page_table_check_ptes_set(mm, ptep, pteval, nr);
for (;;) {
__set_pte_at(ptep, pteval);
__set_pte_at(mm, ptep, pteval);
if (--nr == 0)
break;
ptep++;
......@@ -541,7 +541,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
static inline void pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
__set_pte_at(ptep, __pte(0));
__set_pte_at(mm, ptep, __pte(0));
}
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */
......@@ -713,14 +713,14 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
page_table_check_pmd_set(mm, pmdp, pmd);
return __set_pte_at((pte_t *)pmdp, pmd_pte(pmd));
return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd));
}
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
page_table_check_pud_set(mm, pudp, pud);
return __set_pte_at((pte_t *)pudp, pud_pte(pud));
return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud));
}
#ifdef CONFIG_PAGE_TABLE_CHECK
......@@ -871,8 +871,8 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
#ifdef CONFIG_COMPAT
#define TASK_SIZE_32 (_AC(0x80000000, UL))
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
#define TASK_SIZE (is_compat_task() ? \
TASK_SIZE_32 : TASK_SIZE_64)
#else
#define TASK_SIZE TASK_SIZE_64
......
......@@ -14,22 +14,21 @@
#include <asm/ptrace.h>
#ifdef CONFIG_64BIT
#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
#define STACK_TOP_MAX TASK_SIZE
/*
* addr is a hint to the maximum userspace address that mmap should provide, so
* this macro needs to return the largest address space available so that
* mmap_end < addr, being mmap_end the top of that address space.
* See Documentation/arch/riscv/vm-layout.rst for more details.
*/
#define arch_get_mmap_end(addr, len, flags) \
({ \
unsigned long mmap_end; \
typeof(addr) _addr = (addr); \
if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \
mmap_end = STACK_TOP_MAX; \
else if ((_addr) >= VA_USER_SV57) \
if ((_addr) == 0 || is_compat_task() || \
((_addr + len) > BIT(VA_BITS - 1))) \
mmap_end = STACK_TOP_MAX; \
else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
mmap_end = VA_USER_SV48; \
else \
mmap_end = VA_USER_SV39; \
mmap_end = (_addr + len); \
mmap_end; \
})
......@@ -39,17 +38,17 @@
typeof(addr) _addr = (addr); \
typeof(base) _base = (base); \
unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \
if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \
if ((_addr) == 0 || is_compat_task() || \
((_addr + len) > BIT(VA_BITS - 1))) \
mmap_base = (_base); \
else if (((_addr) >= VA_USER_SV57) && (VA_BITS >= VA_BITS_SV57)) \
mmap_base = VA_USER_SV57 - rnd_gap; \
else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
mmap_base = VA_USER_SV48 - rnd_gap; \
else \
mmap_base = VA_USER_SV39 - rnd_gap; \
mmap_base = (_addr + len) - rnd_gap; \
mmap_base; \
})
#ifdef CONFIG_64BIT
#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
#define STACK_TOP_MAX TASK_SIZE_64
#else
#define DEFAULT_MAP_WINDOW TASK_SIZE
#define STACK_TOP_MAX TASK_SIZE
......
......@@ -34,9 +34,9 @@ static __must_check inline bool may_use_simd(void)
return false;
/*
* Nesting is acheived in preempt_v by spreading the control for
* Nesting is achieved in preempt_v by spreading the control for
* preemptible and non-preemptible kernel-mode Vector into two fields.
* Always try to match with prempt_v if kernel V-context exists. Then,
* Always try to match with preempt_v if kernel V-context exists. Then,
* fallback to check non preempt_v if nesting happens, or if the config
* is not set.
*/
......
......@@ -55,4 +55,7 @@ int hibernate_resume_nonboot_cpu_disable(void);
asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp,
unsigned long cpu_resume);
asmlinkage int hibernate_core_restore_code(void);
bool riscv_sbi_hsm_is_supported(void);
bool riscv_sbi_suspend_state_is_valid(u32 state);
int riscv_sbi_hart_suspend(u32 state);
#endif
......@@ -19,65 +19,6 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot)
return true;
}
#ifdef CONFIG_RISCV_ISA_SVNAPOT
#include <linux/pgtable.h>
#endif
#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
u64 pfn, unsigned int max_page_shift)
{
unsigned long map_size = PAGE_SIZE;
unsigned long size, order;
if (!has_svnapot())
return map_size;
for_each_napot_order_rev(order) {
if (napot_cont_shift(order) > max_page_shift)
continue;
size = napot_cont_size(order);
if (end - addr < size)
continue;
if (!IS_ALIGNED(addr, size))
continue;
if (!IS_ALIGNED(PFN_PHYS(pfn), size))
continue;
map_size = size;
break;
}
return map_size;
}
#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
static inline int arch_vmap_pte_supported_shift(unsigned long size)
{
int shift = PAGE_SHIFT;
unsigned long order;
if (!has_svnapot())
return shift;
WARN_ON_ONCE(size >= PMD_SIZE);
for_each_napot_order_rev(order) {
if (napot_cont_size(order) > size)
continue;
if (!IS_ALIGNED(size, napot_cont_size(order)))
continue;
shift = napot_cont_shift(order);
break;
}
return shift;
}
#endif /* CONFIG_RISCV_ISA_SVNAPOT */
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
#endif /* _ASM_RISCV_VMALLOC_H */
......@@ -377,14 +377,14 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
return ret;
}
#else
static const struct user_regset_view compat_riscv_user_native_view = {};
#endif /* CONFIG_COMPAT */
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
#ifdef CONFIG_COMPAT
if (test_tsk_thread_flag(task, TIF_32BIT))
if (is_compat_thread(&task->thread_info))
return &compat_riscv_user_native_view;
else
#endif
return &riscv_user_native_view;
}
......@@ -128,4 +128,53 @@ static int __init sbi_system_suspend_init(void)
}
arch_initcall(sbi_system_suspend_init);
static int sbi_suspend_finisher(unsigned long suspend_type,
unsigned long resume_addr,
unsigned long opaque)
{
struct sbiret ret;
ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
suspend_type, resume_addr, opaque, 0, 0, 0);
return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
}
int riscv_sbi_hart_suspend(u32 state)
{
if (state & SBI_HSM_SUSP_NON_RET_BIT)
return cpu_suspend(state, sbi_suspend_finisher);
else
return sbi_suspend_finisher(state, 0, 0);
}
bool riscv_sbi_suspend_state_is_valid(u32 state)
{
if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
state < SBI_HSM_SUSPEND_RET_PLATFORM)
return false;
if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
return false;
return true;
}
bool riscv_sbi_hsm_is_supported(void)
{
/*
* The SBI HSM suspend function is only available when:
* 1) SBI version is 0.3 or higher
* 2) SBI HSM extension is available
*/
if (sbi_spec_version < sbi_mk_version(0, 3) ||
!sbi_probe_extension(SBI_EXT_HSM)) {
pr_info("HSM suspend not available\n");
return false;
}
return true;
}
#endif /* CONFIG_RISCV_SBI */
......@@ -218,8 +218,7 @@ static int check_unaligned_access_speed_all_cpus(void)
{
unsigned int cpu;
unsigned int cpu_count = num_possible_cpus();
struct page **bufs = kzalloc(cpu_count * sizeof(struct page *),
GFP_KERNEL);
struct page **bufs = kcalloc(cpu_count, sizeof(*bufs), GFP_KERNEL);
if (!bufs) {
pr_warn("Allocation failure, not measuring misaligned performance\n");
......
......@@ -82,12 +82,12 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
#endif /* CONFIG_SMP */
#ifdef CONFIG_MMU
void flush_icache_pte(pte_t pte)
void flush_icache_pte(struct mm_struct *mm, pte_t pte)
{
struct folio *folio = page_folio(pte_page(pte));
if (!test_bit(PG_dcache_clean, &folio->flags)) {
flush_icache_all();
flush_icache_mm(mm, false);
set_bit(PG_dcache_clean, &folio->flags);
}
}
......
......@@ -10,7 +10,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
pte_t entry, int dirty)
{
if (!pte_same(ptep_get(ptep), entry))
__set_pte_at(ptep, entry);
__set_pte_at(vma->vm_mm, ptep, entry);
/*
* update_mmu_cache will unconditionally execute, handling both
* the case that the PTE changed and the spurious fault case.
......
......@@ -286,7 +286,7 @@ config ACPI_CPPC_LIB
config ACPI_PROCESSOR
tristate "Processor"
depends on X86 || ARM64 || LOONGARCH
depends on X86 || ARM64 || LOONGARCH || RISCV
select ACPI_PROCESSOR_IDLE
select ACPI_CPU_FREQ_PSS if X86 || LOONGARCH
select THERMAL
......
# SPDX-License-Identifier: GPL-2.0-only
obj-y += rhct.o
obj-y += rhct.o
obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o
obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o
// SPDX-License-Identifier: GPL-2.0-only
/*
* Implement CPPC FFH helper routines for RISC-V.
*
* Copyright (C) 2024 Ventana Micro Systems Inc.
*/
#include <acpi/cppc_acpi.h>
#include <asm/csr.h>
#include <asm/sbi.h>
#define SBI_EXT_CPPC 0x43505043
/* CPPC interfaces defined in SBI spec */
#define SBI_CPPC_PROBE 0x0
#define SBI_CPPC_READ 0x1
#define SBI_CPPC_READ_HI 0x2
#define SBI_CPPC_WRITE 0x3
/* RISC-V FFH definitions from RISC-V FFH spec */
#define FFH_CPPC_TYPE(r) (((r) & GENMASK_ULL(63, 60)) >> 60)
#define FFH_CPPC_SBI_REG(r) ((r) & GENMASK(31, 0))
#define FFH_CPPC_CSR_NUM(r) ((r) & GENMASK(11, 0))
#define FFH_CPPC_SBI 0x1
#define FFH_CPPC_CSR 0x2
struct sbi_cppc_data {
u64 val;
u32 reg;
struct sbiret ret;
};
static bool cppc_ext_present;
static int __init sbi_cppc_init(void)
{
if (sbi_spec_version >= sbi_mk_version(2, 0) &&
sbi_probe_extension(SBI_EXT_CPPC) > 0) {
pr_info("SBI CPPC extension detected\n");
cppc_ext_present = true;
} else {
pr_info("SBI CPPC extension NOT detected!!\n");
cppc_ext_present = false;
}
return 0;
}
device_initcall(sbi_cppc_init);
static void sbi_cppc_read(void *read_data)
{
struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data;
data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_READ,
data->reg, 0, 0, 0, 0, 0);
}
static void sbi_cppc_write(void *write_data)
{
struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data;
data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_WRITE,
data->reg, data->val, 0, 0, 0, 0);
}
static void cppc_ffh_csr_read(void *read_data)
{
struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data;
switch (data->reg) {
/* Support only TIME CSR for now */
case CSR_TIME:
data->ret.value = csr_read(CSR_TIME);
data->ret.error = 0;
break;
default:
data->ret.error = -EINVAL;
break;
}
}
static void cppc_ffh_csr_write(void *write_data)
{
struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data;
data->ret.error = -EINVAL;
}
/*
* Refer to drivers/acpi/cppc_acpi.c for the description of the functions
* below.
*/
bool cpc_ffh_supported(void)
{
return true;
}
int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
{
struct sbi_cppc_data data;
if (WARN_ON_ONCE(irqs_disabled()))
return -EPERM;
if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) {
if (!cppc_ext_present)
return -EINVAL;
data.reg = FFH_CPPC_SBI_REG(reg->address);
smp_call_function_single(cpu, sbi_cppc_read, &data, 1);
*val = data.ret.value;
return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
} else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) {
data.reg = FFH_CPPC_CSR_NUM(reg->address);
smp_call_function_single(cpu, cppc_ffh_csr_read, &data, 1);
*val = data.ret.value;
return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
}
return -EINVAL;
}
int cpc_write_ffh(int cpu, struct cpc_reg *reg, u64 val)
{
struct sbi_cppc_data data;
if (WARN_ON_ONCE(irqs_disabled()))
return -EPERM;
if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) {
if (!cppc_ext_present)
return -EINVAL;
data.reg = FFH_CPPC_SBI_REG(reg->address);
data.val = val;
smp_call_function_single(cpu, sbi_cppc_write, &data, 1);
return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
} else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) {
data.reg = FFH_CPPC_CSR_NUM(reg->address);
data.val = val;
smp_call_function_single(cpu, cppc_ffh_csr_write, &data, 1);
return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
}
return -EINVAL;
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2024, Ventana Micro Systems Inc
* Author: Sunil V L <sunilvl@ventanamicro.com>
*
*/
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <linux/cpu_pm.h>
#include <linux/cpuidle.h>
#include <linux/suspend.h>
#include <asm/cpuidle.h>
#include <asm/sbi.h>
#include <asm/suspend.h>
#define RISCV_FFH_LPI_TYPE_MASK GENMASK_ULL(63, 60)
#define RISCV_FFH_LPI_RSVD_MASK GENMASK_ULL(59, 32)
#define RISCV_FFH_LPI_TYPE_SBI BIT_ULL(60)
static int acpi_cpu_init_idle(unsigned int cpu)
{
int i;
struct acpi_lpi_state *lpi;
struct acpi_processor *pr = per_cpu(processors, cpu);
if (unlikely(!pr || !pr->flags.has_lpi))
return -EINVAL;
if (!riscv_sbi_hsm_is_supported())
return -ENODEV;
if (pr->power.count <= 1)
return -ENODEV;
for (i = 1; i < pr->power.count; i++) {
u32 state;
lpi = &pr->power.lpi_states[i];
/*
* Validate Entry Method as per FFH spec.
* bits[63:60] should be 0x1
* bits[59:32] should be 0x0
* bits[31:0] represent a SBI power_state
*/
if (((lpi->address & RISCV_FFH_LPI_TYPE_MASK) != RISCV_FFH_LPI_TYPE_SBI) ||
(lpi->address & RISCV_FFH_LPI_RSVD_MASK)) {
pr_warn("Invalid LPI entry method %#llx\n", lpi->address);
return -EINVAL;
}
state = lpi->address;
if (!riscv_sbi_suspend_state_is_valid(state)) {
pr_warn("Invalid SBI power state %#x\n", state);
return -EINVAL;
}
}
return 0;
}
int acpi_processor_ffh_lpi_probe(unsigned int cpu)
{
return acpi_cpu_init_idle(cpu);
}
int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
{
u32 state = lpi->address;
if (state & SBI_HSM_SUSP_NON_RET_BIT)
return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend,
lpi->index,
state);
else
return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend,
lpi->index,
state);
}
......@@ -302,4 +302,33 @@ config QORIQ_CPUFREQ
which are capable of changing the CPU's frequency dynamically.
endif
config ACPI_CPPC_CPUFREQ
tristate "CPUFreq driver based on the ACPI CPPC spec"
depends on ACPI_PROCESSOR
depends on ARM || ARM64 || RISCV
select ACPI_CPPC_LIB
help
This adds a CPUFreq driver which uses CPPC methods
as described in the ACPIv5.1 spec. CPPC stands for
Collaborative Processor Performance Controls. It
is based on an abstract continuous scale of CPU
performance values which allows the remote power
processor to flexibly optimize for power and
performance. CPPC relies on power management firmware
support for its operation.
If in doubt, say N.
config ACPI_CPPC_CPUFREQ_FIE
bool "Frequency Invariance support for CPPC cpufreq driver"
depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
depends on ARM || ARM64 || RISCV
default y
help
This extends frequency invariance support in the CPPC cpufreq driver,
by using CPPC delivered and reference performance counters.
If in doubt, say N.
endmenu
......@@ -3,32 +3,6 @@
# ARM CPU Frequency scaling drivers
#
config ACPI_CPPC_CPUFREQ
tristate "CPUFreq driver based on the ACPI CPPC spec"
depends on ACPI_PROCESSOR
select ACPI_CPPC_LIB
help
This adds a CPUFreq driver which uses CPPC methods
as described in the ACPIv5.1 spec. CPPC stands for
Collaborative Processor Performance Controls. It
is based on an abstract continuous scale of CPU
performance values which allows the remote power
processor to flexibly optimize for power and
performance. CPPC relies on power management firmware
support for its operation.
If in doubt, say N.
config ACPI_CPPC_CPUFREQ_FIE
bool "Frequency Invariance support for CPPC cpufreq driver"
depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
default y
help
This extends frequency invariance support in the CPPC cpufreq driver,
by using CPPC delivered and reference performance counters.
If in doubt, say N.
config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
tristate "Allwinner nvmem based SUN50I CPUFreq driver"
depends on ARCH_SUNXI
......
......@@ -73,26 +73,6 @@ static inline bool sbi_is_domain_state_available(void)
return data->available;
}
static int sbi_suspend_finisher(unsigned long suspend_type,
unsigned long resume_addr,
unsigned long opaque)
{
struct sbiret ret;
ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
suspend_type, resume_addr, opaque, 0, 0, 0);
return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
}
static int sbi_suspend(u32 state)
{
if (state & SBI_HSM_SUSP_NON_RET_BIT)
return cpu_suspend(state, sbi_suspend_finisher);
else
return sbi_suspend_finisher(state, 0, 0);
}
static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
......@@ -100,9 +80,9 @@ static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
u32 state = states[idx];
if (state & SBI_HSM_SUSP_NON_RET_BIT)
return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, state);
return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, idx, state);
else
return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(sbi_suspend,
return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend,
idx, state);
}
......@@ -133,7 +113,7 @@ static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
else
state = states[idx];
ret = sbi_suspend(state) ? -1 : idx;
ret = riscv_sbi_hart_suspend(state) ? -1 : idx;
ct_cpuidle_exit();
......@@ -206,17 +186,6 @@ static const struct of_device_id sbi_cpuidle_state_match[] = {
{ },
};
static bool sbi_suspend_state_is_valid(u32 state)
{
if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
state < SBI_HSM_SUSPEND_RET_PLATFORM)
return false;
if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
return false;
return true;
}
static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
{
int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state);
......@@ -226,7 +195,7 @@ static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
return err;
}
if (!sbi_suspend_state_is_valid(*state)) {
if (!riscv_sbi_suspend_state_is_valid(*state)) {
pr_warn("Invalid SBI suspend state %#x\n", *state);
return -EINVAL;
}
......@@ -607,16 +576,8 @@ static int __init sbi_cpuidle_init(void)
int ret;
struct platform_device *pdev;
/*
* The SBI HSM suspend function is only available when:
* 1) SBI version is 0.3 or higher
* 2) SBI HSM extension is available
*/
if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
!sbi_probe_extension(SBI_EXT_HSM)) {
pr_info("HSM suspend not available\n");
if (!riscv_sbi_hsm_is_supported())
return 0;
}
ret = platform_driver_register(&sbi_cpuidle_driver);
if (ret)
......
......@@ -6,30 +6,9 @@
TEST(infinite_rlimit)
{
// Only works on 64 bit
#if __riscv_xlen == 64
struct addresses mmap_addresses;
EXPECT_EQ(BOTTOM_UP, memory_layout());
do_mmaps(&mmap_addresses);
EXPECT_NE(MAP_FAILED, mmap_addresses.no_hint);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_37_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_38_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_46_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_47_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_55_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_56_addr);
EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.no_hint);
EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_37_addr);
EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_38_addr);
EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_46_addr);
EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_47_addr);
EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_55_addr);
EXPECT_GT(1UL << 56, (unsigned long)mmap_addresses.on_56_addr);
#endif
TEST_MMAPS;
}
TEST_HARNESS_MAIN
......@@ -6,30 +6,9 @@
TEST(default_rlimit)
{
// Only works on 64 bit
#if __riscv_xlen == 64
struct addresses mmap_addresses;
EXPECT_EQ(TOP_DOWN, memory_layout());
do_mmaps(&mmap_addresses);
EXPECT_NE(MAP_FAILED, mmap_addresses.no_hint);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_37_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_38_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_46_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_47_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_55_addr);
EXPECT_NE(MAP_FAILED, mmap_addresses.on_56_addr);
EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.no_hint);
EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_37_addr);
EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_38_addr);
EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_46_addr);
EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_47_addr);
EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_55_addr);
EXPECT_GT(1UL << 56, (unsigned long)mmap_addresses.on_56_addr);
#endif
TEST_MMAPS;
}
TEST_HARNESS_MAIN
......@@ -4,63 +4,86 @@
#include <sys/mman.h>
#include <sys/resource.h>
#include <stddef.h>
#include <strings.h>
#include "../../kselftest_harness.h"
#define TOP_DOWN 0
#define BOTTOM_UP 1
struct addresses {
int *no_hint;
int *on_37_addr;
int *on_38_addr;
int *on_46_addr;
int *on_47_addr;
int *on_55_addr;
int *on_56_addr;
#if __riscv_xlen == 64
uint64_t random_addresses[] = {
0x19764f0d73b3a9f0, 0x016049584cecef59, 0x3580bdd3562f4acd,
0x1164219f20b17da0, 0x07d97fcb40ff2373, 0x76ec528921272ee7,
0x4dd48c38a3de3f70, 0x2e11415055f6997d, 0x14b43334ac476c02,
0x375a60795aff19f6, 0x47f3051725b8ee1a, 0x4e697cf240494a9f,
0x456b59b5c2f9e9d1, 0x101724379d63cb96, 0x7fe9ad31619528c1,
0x2f417247c495c2ea, 0x329a5a5b82943a5e, 0x06d7a9d6adcd3827,
0x327b0b9ee37f62d5, 0x17c7b1851dfd9b76, 0x006ebb6456ec2cd9,
0x00836cd14146a134, 0x00e5c4dcde7126db, 0x004c29feadf75753,
0x00d8b20149ed930c, 0x00d71574c269387a, 0x0006ebe4a82acb7a,
0x0016135df51f471b, 0x00758bdb55455160, 0x00d0bdd949b13b32,
0x00ecea01e7c5f54b, 0x00e37b071b9948b1, 0x0011fdd00ff57ab3,
0x00e407294b52f5ea, 0x00567748c200ed20, 0x000d073084651046,
0x00ac896f4365463c, 0x00eb0d49a0b26216, 0x0066a2564a982a31,
0x002e0d20237784ae, 0x0000554ff8a77a76, 0x00006ce07a54c012,
0x000009570516d799, 0x00000954ca15b84d, 0x0000684f0d453379,
0x00002ae5816302b5, 0x0000042403fb54bf, 0x00004bad7392bf30,
0x00003e73bfa4b5e3, 0x00005442c29978e0, 0x00002803f11286b6,
0x000073875d745fc6, 0x00007cede9cb8240, 0x000027df84cc6a4f,
0x00006d7e0e74242a, 0x00004afd0b836e02, 0x000047d0e837cd82,
0x00003b42405efeda, 0x00001531bafa4c95, 0x00007172cae34ac4,
};
#else
uint32_t random_addresses[] = {
0x8dc302e0, 0x929ab1e0, 0xb47683ba, 0xea519c73, 0xa19f1c90, 0xc49ba213,
0x8f57c625, 0xadfe5137, 0x874d4d95, 0xaa20f09d, 0xcf21ebfc, 0xda7737f1,
0xcedf392a, 0x83026c14, 0xccedca52, 0xc6ccf826, 0xe0cd9415, 0x997472ca,
0xa21a44c1, 0xe82196f5, 0xa23fd66b, 0xc28d5590, 0xd009cdce, 0xcf0be646,
0x8fc8c7ff, 0xe2a85984, 0xa3d3236b, 0x89a0619d, 0xc03db924, 0xb5d4cc1b,
0xb96ee04c, 0xd191da48, 0xb432a000, 0xaa2bebbc, 0xa2fcb289, 0xb0cca89b,
0xb0c18d6a, 0x88f58deb, 0xa4d42d1c, 0xe4d74e86, 0x99902b09, 0x8f786d31,
0xbec5e381, 0x9a727e65, 0xa9a65040, 0xa880d789, 0x8f1b335e, 0xfc821c1e,
0x97e34be4, 0xbbef84ed, 0xf447d197, 0xfd7ceee2, 0xe632348d, 0xee4590f4,
0x958992a5, 0xd57e05d6, 0xfd240970, 0xc5b0dcff, 0xd96da2c2, 0xa7ae041d,
};
#endif
// Only works on 64 bit
#if __riscv_xlen == 64
static inline void do_mmaps(struct addresses *mmap_addresses)
{
/*
* Place all of the hint addresses on the boundaries of mmap
* sv39, sv48, sv57
* User addresses end at 1<<38, 1<<47, 1<<56 respectively
*/
void *on_37_bits = (void *)(1UL << 37);
void *on_38_bits = (void *)(1UL << 38);
void *on_46_bits = (void *)(1UL << 46);
void *on_47_bits = (void *)(1UL << 47);
void *on_55_bits = (void *)(1UL << 55);
void *on_56_bits = (void *)(1UL << 56);
#define PROT (PROT_READ | PROT_WRITE)
#define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS)
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
/* mmap must return a value that doesn't use more bits than the hint address. */
static inline unsigned long get_max_value(unsigned long input)
{
unsigned long max_bit = (1UL << (((sizeof(unsigned long) * 8) - 1 -
__builtin_clzl(input))));
mmap_addresses->no_hint =
mmap(NULL, 5 * sizeof(int), prot, flags, 0, 0);
mmap_addresses->on_37_addr =
mmap(on_37_bits, 5 * sizeof(int), prot, flags, 0, 0);
mmap_addresses->on_38_addr =
mmap(on_38_bits, 5 * sizeof(int), prot, flags, 0, 0);
mmap_addresses->on_46_addr =
mmap(on_46_bits, 5 * sizeof(int), prot, flags, 0, 0);
mmap_addresses->on_47_addr =
mmap(on_47_bits, 5 * sizeof(int), prot, flags, 0, 0);
mmap_addresses->on_55_addr =
mmap(on_55_bits, 5 * sizeof(int), prot, flags, 0, 0);
mmap_addresses->on_56_addr =
mmap(on_56_bits, 5 * sizeof(int), prot, flags, 0, 0);
return max_bit + (max_bit - 1);
}
#define TEST_MMAPS \
({ \
void *mmap_addr; \
for (int i = 0; i < ARRAY_SIZE(random_addresses); i++) { \
mmap_addr = mmap((void *)random_addresses[i], \
5 * sizeof(int), PROT, FLAGS, 0, 0); \
EXPECT_NE(MAP_FAILED, mmap_addr); \
EXPECT_GE((void *)get_max_value(random_addresses[i]), \
mmap_addr); \
mmap_addr = mmap((void *)random_addresses[i], \
5 * sizeof(int), PROT, FLAGS, 0, 0); \
EXPECT_NE(MAP_FAILED, mmap_addr); \
EXPECT_GE((void *)get_max_value(random_addresses[i]), \
mmap_addr); \
} \
})
#endif /* __riscv_xlen == 64 */
static inline int memory_layout(void)
{
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
void *value1 = mmap(NULL, sizeof(int), prot, flags, 0, 0);
void *value2 = mmap(NULL, sizeof(int), prot, flags, 0, 0);
void *value1 = mmap(NULL, sizeof(int), PROT, FLAGS, 0, 0);
void *value2 = mmap(NULL, sizeof(int), PROT, FLAGS, 0, 0);
return value2 > value1;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment