Commit 890daede authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv-for-linus-6.11-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V fixes from Palmer Dabbelt:

 - A revert for the mmap() change that ties the allocation range to the
   hint adress, as what we tried to do ended up regressing on other
   userspace workloads.

 - A fix to avoid a kernel memory leak when emulating misaligned
   accesses from userspace.

 - A Kconfig fix for toolchain vector detection, which now correctly
   detects vector support on toolchains where the V extension depends on
   the M extension.

 - A fix to avoid failing the linear mapping bootmem bounds check on
   NOMMU systems.

 - A fix for early alternatives on relocatable kernels.

* tag 'riscv-for-linus-6.11-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: Fix RISCV_ALTERNATIVE_EARLY
  riscv: Do not restrict memory size because of linear mapping on nommu
  riscv: Fix toolchain vector detection
  riscv: misaligned: Restrict user access to kernel memory
  riscv: mm: Do not restrict mmap address based on hint
  riscv: selftests: Remove mmap hint address checks
  Revert "RISC-V: mm: Document mmap changes"
parents a78d7dce 1ff95eb2
......@@ -134,19 +134,3 @@ RISC-V Linux Kernel SV57
ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules, BPF
ffffffff80000000 | -2 GB | ffffffffffffffff | 2 GB | kernel
__________________|____________|__________________|_________|____________________________________________________________
Userspace VAs
--------------------
To maintain compatibility with software that relies on the VA space with a
maximum of 48 bits the kernel will, by default, return virtual addresses to
userspace from a 48-bit range (sv48). This default behavior is achieved by
passing 0 into the hint address parameter of mmap. On CPUs with an address space
smaller than sv48, the CPU maximum supported address space will be the default.
Software can "opt-in" to receiving VAs from another VA space by providing
a hint address to mmap. When a hint address is passed to mmap, the returned
address will never use more bits than the hint address. For example, if a hint
address of `1 << 40` is passed to mmap, a valid returned address will never use
bits 41 through 63. If no mappable addresses are available in that range, mmap
will return `MAP_FAILED`.
......@@ -552,8 +552,8 @@ config RISCV_ISA_SVPBMT
config TOOLCHAIN_HAS_V
bool
default y
depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64iv)
depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32iv)
depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64imv)
depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32imv)
depends on LLD_VERSION >= 140000 || LD_VERSION >= 23800
depends on AS_HAS_OPTION_ARCH
......
......@@ -14,36 +14,14 @@
#include <asm/ptrace.h>
/*
* addr is a hint to the maximum userspace address that mmap should provide, so
* this macro needs to return the largest address space available so that
* mmap_end < addr, being mmap_end the top of that address space.
* See Documentation/arch/riscv/vm-layout.rst for more details.
*/
#define arch_get_mmap_end(addr, len, flags) \
({ \
unsigned long mmap_end; \
typeof(addr) _addr = (addr); \
if ((_addr) == 0 || is_compat_task() || \
((_addr + len) > BIT(VA_BITS - 1))) \
mmap_end = STACK_TOP_MAX; \
else \
mmap_end = (_addr + len); \
mmap_end; \
STACK_TOP_MAX; \
})
#define arch_get_mmap_base(addr, base) \
({ \
unsigned long mmap_base; \
typeof(addr) _addr = (addr); \
typeof(base) _base = (base); \
unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \
if ((_addr) == 0 || is_compat_task() || \
((_addr + len) > BIT(VA_BITS - 1))) \
mmap_base = (_base); \
else \
mmap_base = (_addr + len) - rnd_gap; \
mmap_base; \
base; \
})
#ifdef CONFIG_64BIT
......
......@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/jump_label.h>
#ifdef CONFIG_RISCV_SBI
enum sbi_ext_id {
......@@ -304,6 +305,7 @@ struct sbiret {
};
void sbi_init(void);
long __sbi_base_ecall(int fid);
struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5,
......@@ -373,7 +375,23 @@ static inline unsigned long sbi_mk_version(unsigned long major,
| (minor & SBI_SPEC_VERSION_MINOR_MASK);
}
int sbi_err_map_linux_errno(int err);
static inline int sbi_err_map_linux_errno(int err)
{
switch (err) {
case SBI_SUCCESS:
return 0;
case SBI_ERR_DENIED:
return -EPERM;
case SBI_ERR_INVALID_PARAM:
return -EINVAL;
case SBI_ERR_INVALID_ADDRESS:
return -EFAULT;
case SBI_ERR_NOT_SUPPORTED:
case SBI_ERR_FAILURE:
default:
return -ENOTSUPP;
};
}
extern bool sbi_debug_console_available;
int sbi_debug_console_write(const char *bytes, unsigned int num_bytes);
......
......@@ -20,17 +20,21 @@ endif
ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
CFLAGS_alternative.o := -mcmodel=medany
CFLAGS_cpufeature.o := -mcmodel=medany
CFLAGS_sbi_ecall.o := -mcmodel=medany
ifdef CONFIG_FTRACE
CFLAGS_REMOVE_alternative.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_cpufeature.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sbi_ecall.o = $(CC_FLAGS_FTRACE)
endif
ifdef CONFIG_RELOCATABLE
CFLAGS_alternative.o += -fno-pie
CFLAGS_cpufeature.o += -fno-pie
CFLAGS_sbi_ecall.o += -fno-pie
endif
ifdef CONFIG_KASAN
KASAN_SANITIZE_alternative.o := n
KASAN_SANITIZE_cpufeature.o := n
KASAN_SANITIZE_sbi_ecall.o := n
endif
endif
......@@ -88,7 +92,7 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
obj-$(CONFIG_RISCV_SBI) += sbi.o
obj-$(CONFIG_RISCV_SBI) += sbi.o sbi_ecall.o
ifeq ($(CONFIG_RISCV_SBI), y)
obj-$(CONFIG_SMP) += sbi-ipi.o
obj-$(CONFIG_SMP) += cpu_ops_sbi.o
......
......@@ -14,9 +14,6 @@
#include <asm/smp.h>
#include <asm/tlbflush.h>
#define CREATE_TRACE_POINTS
#include <asm/trace.h>
/* default SBI version is 0.1 */
unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
EXPORT_SYMBOL(sbi_spec_version);
......@@ -27,55 +24,6 @@ static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask,
unsigned long start, unsigned long size,
unsigned long arg4, unsigned long arg5) __ro_after_init;
struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5,
int fid, int ext)
{
struct sbiret ret;
trace_sbi_call(ext, fid);
register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);
register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);
register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);
register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);
register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4);
register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5);
register uintptr_t a6 asm ("a6") = (uintptr_t)(fid);
register uintptr_t a7 asm ("a7") = (uintptr_t)(ext);
asm volatile ("ecall"
: "+r" (a0), "+r" (a1)
: "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
: "memory");
ret.error = a0;
ret.value = a1;
trace_sbi_return(ext, ret.error, ret.value);
return ret;
}
EXPORT_SYMBOL(__sbi_ecall);
int sbi_err_map_linux_errno(int err)
{
switch (err) {
case SBI_SUCCESS:
return 0;
case SBI_ERR_DENIED:
return -EPERM;
case SBI_ERR_INVALID_PARAM:
return -EINVAL;
case SBI_ERR_INVALID_ADDRESS:
return -EFAULT;
case SBI_ERR_NOT_SUPPORTED:
case SBI_ERR_FAILURE:
default:
return -ENOTSUPP;
};
}
EXPORT_SYMBOL(sbi_err_map_linux_errno);
#ifdef CONFIG_RISCV_SBI_V01
static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mask)
{
......@@ -535,17 +483,6 @@ long sbi_probe_extension(int extid)
}
EXPORT_SYMBOL(sbi_probe_extension);
static long __sbi_base_ecall(int fid)
{
struct sbiret ret;
ret = sbi_ecall(SBI_EXT_BASE, fid, 0, 0, 0, 0, 0, 0);
if (!ret.error)
return ret.value;
else
return sbi_err_map_linux_errno(ret.error);
}
static inline long sbi_get_spec_version(void)
{
return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION);
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Rivos Inc. */
#include <asm/sbi.h>
#define CREATE_TRACE_POINTS
#include <asm/trace.h>
long __sbi_base_ecall(int fid)
{
struct sbiret ret;
ret = sbi_ecall(SBI_EXT_BASE, fid, 0, 0, 0, 0, 0, 0);
if (!ret.error)
return ret.value;
else
return sbi_err_map_linux_errno(ret.error);
}
EXPORT_SYMBOL(__sbi_base_ecall);
struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5,
int fid, int ext)
{
struct sbiret ret;
trace_sbi_call(ext, fid);
register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);
register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);
register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);
register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);
register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4);
register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5);
register uintptr_t a6 asm ("a6") = (uintptr_t)(fid);
register uintptr_t a7 asm ("a7") = (uintptr_t)(ext);
asm volatile ("ecall"
: "+r" (a0), "+r" (a1)
: "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
: "memory");
ret.error = a0;
ret.value = a1;
trace_sbi_return(ext, ret.error, ret.value);
return ret;
}
EXPORT_SYMBOL(__sbi_ecall);
......@@ -417,7 +417,7 @@ int handle_misaligned_load(struct pt_regs *regs)
val.data_u64 = 0;
if (user_mode(regs)) {
if (raw_copy_from_user(&val, (u8 __user *)addr, len))
if (copy_from_user(&val, (u8 __user *)addr, len))
return -1;
} else {
memcpy(&val, (u8 *)addr, len);
......@@ -515,7 +515,7 @@ int handle_misaligned_store(struct pt_regs *regs)
return -EOPNOTSUPP;
if (user_mode(regs)) {
if (raw_copy_to_user((u8 __user *)addr, &val, len))
if (copy_to_user((u8 __user *)addr, &val, len))
return -1;
} else {
memcpy((u8 *)addr, &val, len);
......
......@@ -252,7 +252,7 @@ static void __init setup_bootmem(void)
* The size of the linear page mapping may restrict the amount of
* usable RAM.
*/
if (IS_ENABLED(CONFIG_64BIT)) {
if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) {
max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
memblock_cap_memory_range(phys_ram_base,
max_mapped_addr - phys_ram_base);
......
......@@ -7,8 +7,6 @@
TEST(infinite_rlimit)
{
EXPECT_EQ(BOTTOM_UP, memory_layout());
TEST_MMAPS;
}
TEST_HARNESS_MAIN
......@@ -7,8 +7,6 @@
TEST(default_rlimit)
{
EXPECT_EQ(TOP_DOWN, memory_layout());
TEST_MMAPS;
}
TEST_HARNESS_MAIN
......@@ -10,76 +10,9 @@
#define TOP_DOWN 0
#define BOTTOM_UP 1
#if __riscv_xlen == 64
uint64_t random_addresses[] = {
0x19764f0d73b3a9f0, 0x016049584cecef59, 0x3580bdd3562f4acd,
0x1164219f20b17da0, 0x07d97fcb40ff2373, 0x76ec528921272ee7,
0x4dd48c38a3de3f70, 0x2e11415055f6997d, 0x14b43334ac476c02,
0x375a60795aff19f6, 0x47f3051725b8ee1a, 0x4e697cf240494a9f,
0x456b59b5c2f9e9d1, 0x101724379d63cb96, 0x7fe9ad31619528c1,
0x2f417247c495c2ea, 0x329a5a5b82943a5e, 0x06d7a9d6adcd3827,
0x327b0b9ee37f62d5, 0x17c7b1851dfd9b76, 0x006ebb6456ec2cd9,
0x00836cd14146a134, 0x00e5c4dcde7126db, 0x004c29feadf75753,
0x00d8b20149ed930c, 0x00d71574c269387a, 0x0006ebe4a82acb7a,
0x0016135df51f471b, 0x00758bdb55455160, 0x00d0bdd949b13b32,
0x00ecea01e7c5f54b, 0x00e37b071b9948b1, 0x0011fdd00ff57ab3,
0x00e407294b52f5ea, 0x00567748c200ed20, 0x000d073084651046,
0x00ac896f4365463c, 0x00eb0d49a0b26216, 0x0066a2564a982a31,
0x002e0d20237784ae, 0x0000554ff8a77a76, 0x00006ce07a54c012,
0x000009570516d799, 0x00000954ca15b84d, 0x0000684f0d453379,
0x00002ae5816302b5, 0x0000042403fb54bf, 0x00004bad7392bf30,
0x00003e73bfa4b5e3, 0x00005442c29978e0, 0x00002803f11286b6,
0x000073875d745fc6, 0x00007cede9cb8240, 0x000027df84cc6a4f,
0x00006d7e0e74242a, 0x00004afd0b836e02, 0x000047d0e837cd82,
0x00003b42405efeda, 0x00001531bafa4c95, 0x00007172cae34ac4,
};
#else
uint32_t random_addresses[] = {
0x8dc302e0, 0x929ab1e0, 0xb47683ba, 0xea519c73, 0xa19f1c90, 0xc49ba213,
0x8f57c625, 0xadfe5137, 0x874d4d95, 0xaa20f09d, 0xcf21ebfc, 0xda7737f1,
0xcedf392a, 0x83026c14, 0xccedca52, 0xc6ccf826, 0xe0cd9415, 0x997472ca,
0xa21a44c1, 0xe82196f5, 0xa23fd66b, 0xc28d5590, 0xd009cdce, 0xcf0be646,
0x8fc8c7ff, 0xe2a85984, 0xa3d3236b, 0x89a0619d, 0xc03db924, 0xb5d4cc1b,
0xb96ee04c, 0xd191da48, 0xb432a000, 0xaa2bebbc, 0xa2fcb289, 0xb0cca89b,
0xb0c18d6a, 0x88f58deb, 0xa4d42d1c, 0xe4d74e86, 0x99902b09, 0x8f786d31,
0xbec5e381, 0x9a727e65, 0xa9a65040, 0xa880d789, 0x8f1b335e, 0xfc821c1e,
0x97e34be4, 0xbbef84ed, 0xf447d197, 0xfd7ceee2, 0xe632348d, 0xee4590f4,
0x958992a5, 0xd57e05d6, 0xfd240970, 0xc5b0dcff, 0xd96da2c2, 0xa7ae041d,
};
#endif
// Only works on 64 bit
#if __riscv_xlen == 64
#define PROT (PROT_READ | PROT_WRITE)
#define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS)
/* mmap must return a value that doesn't use more bits than the hint address. */
static inline unsigned long get_max_value(unsigned long input)
{
unsigned long max_bit = (1UL << (((sizeof(unsigned long) * 8) - 1 -
__builtin_clzl(input))));
return max_bit + (max_bit - 1);
}
#define TEST_MMAPS \
({ \
void *mmap_addr; \
for (int i = 0; i < ARRAY_SIZE(random_addresses); i++) { \
mmap_addr = mmap((void *)random_addresses[i], \
5 * sizeof(int), PROT, FLAGS, 0, 0); \
EXPECT_NE(MAP_FAILED, mmap_addr); \
EXPECT_GE((void *)get_max_value(random_addresses[i]), \
mmap_addr); \
mmap_addr = mmap((void *)random_addresses[i], \
5 * sizeof(int), PROT, FLAGS, 0, 0); \
EXPECT_NE(MAP_FAILED, mmap_addr); \
EXPECT_GE((void *)get_max_value(random_addresses[i]), \
mmap_addr); \
} \
})
#endif /* __riscv_xlen == 64 */
static inline int memory_layout(void)
{
void *value1 = mmap(NULL, sizeof(int), PROT, FLAGS, 0, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment