Commit 97d8894b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv-for-linus-6.12-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V updates from Palmer Dabbelt:

 - Support using Zkr to seed KASLR

 - Support IPI-triggered CPU backtracing

 - Support for generic CPU vulnerabilities reporting to userspace

 - A few cleanups for missing licenses

 - The size limit on the XIP kernel has been removed

 - Support for tracing userspace stacks

 - Support for the Svvptc extension

 - Various cleanups and fixes throughout the tree

* tag 'riscv-for-linus-6.12-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (47 commits)
  crash: Fix riscv64 crash memory reserve dead loop
  perf/riscv-sbi: Add platform specific firmware event handling
  tools: Optimize ring buffer for riscv
  tools: Add riscv barrier implementation
  RISC-V: Don't have MAX_PHYSMEM_BITS exceed phys_addr_t
  ACPI: NUMA: initialize all values of acpi_early_node_map to NUMA_NO_NODE
  riscv: Enable bitops instrumentation
  riscv: Omit optimized string routines when using KASAN
  ACPI: RISCV: Make acpi_numa_get_nid() to be static
  riscv: Randomize lower bits of stack address
  selftests: riscv: Allow mmap test to compile on 32-bit
  riscv: Make riscv_isa_vendor_ext_andes array static
  riscv: Use LIST_HEAD() to simplify code
  riscv: defconfig: Disable RZ/Five peripheral support
  RISC-V: Implement kgdb_roundup_cpus() to enable future NMI Roundup
  riscv: avoid Imbalance in RAS
  riscv: cacheinfo: Add back init_cache_level() function
  riscv: Remove unused _TIF_WORK_MASK
  drivers/perf: riscv: Remove redundant macro check
  riscv: define ILLEGAL_POINTER_VALUE for 64bit
  ...
parents 7108fff8 b3f835cd
...@@ -171,6 +171,13 @@ properties: ...@@ -171,6 +171,13 @@ properties:
memory types as ratified in the 20191213 version of the privileged memory types as ratified in the 20191213 version of the privileged
ISA specification. ISA specification.
- const: svvptc
description:
The standard Svvptc supervisor-level extension for
address-translation cache behaviour with respect to invalid entries
as ratified at commit 4a69197e5617 ("Update to ratified state") of
riscv-svvptc.
- const: zacas - const: zacas
description: | description: |
The Zacas extension for Atomic Compare-and-Swap (CAS) instructions The Zacas extension for Atomic Compare-and-Swap (CAS) instructions
......
...@@ -70,6 +70,7 @@ config RISCV ...@@ -70,6 +70,7 @@ config RISCV
select ARCH_USE_CMPXCHG_LOCKREF if 64BIT select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
select ARCH_USE_MEMTEST select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_SYM_ANNOTATIONS
select ARCH_USES_CFI_TRAPS if CFI_CLANG select ARCH_USES_CFI_TRAPS if CFI_CLANG
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if MMU select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if MMU
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
...@@ -94,6 +95,7 @@ config RISCV ...@@ -94,6 +95,7 @@ config RISCV
select GENERIC_ATOMIC64 if !64BIT select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_CPU_DEVICES select GENERIC_CPU_DEVICES
select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP select GENERIC_EARLY_IOREMAP
select GENERIC_ENTRY select GENERIC_ENTRY
select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
...@@ -204,6 +206,7 @@ config RISCV ...@@ -204,6 +206,7 @@ config RISCV
select THREAD_INFO_IN_TASK select THREAD_INFO_IN_TASK
select TRACE_IRQFLAGS_SUPPORT select TRACE_IRQFLAGS_SUPPORT
select UACCESS_MEMCPY if !MMU select UACCESS_MEMCPY if !MMU
select USER_STACKTRACE_SUPPORT
select ZONE_DMA32 if 64BIT select ZONE_DMA32 if 64BIT
config CLANG_SUPPORTS_DYNAMIC_FTRACE config CLANG_SUPPORTS_DYNAMIC_FTRACE
...@@ -323,6 +326,11 @@ config GENERIC_HWEIGHT ...@@ -323,6 +326,11 @@ config GENERIC_HWEIGHT
config FIX_EARLYCON_MEM config FIX_EARLYCON_MEM
def_bool MMU def_bool MMU
config ILLEGAL_POINTER_VALUE
hex
default 0 if 32BIT
default 0xdead000000000000 if 64BIT
config PGTABLE_LEVELS config PGTABLE_LEVELS
int int
default 5 if 64BIT default 5 if 64BIT
......
...@@ -137,12 +137,10 @@ CONFIG_VIRTIO_NET=y ...@@ -137,12 +137,10 @@ CONFIG_VIRTIO_NET=y
CONFIG_MACB=y CONFIG_MACB=y
CONFIG_E1000E=y CONFIG_E1000E=y
CONFIG_R8169=y CONFIG_R8169=y
CONFIG_RAVB=y
CONFIG_STMMAC_ETH=m CONFIG_STMMAC_ETH=m
CONFIG_MICREL_PHY=y CONFIG_MICREL_PHY=y
CONFIG_MICROSEMI_PHY=y CONFIG_MICROSEMI_PHY=y
CONFIG_MOTORCOMM_PHY=y CONFIG_MOTORCOMM_PHY=y
CONFIG_CAN_RCAR_CANFD=m
CONFIG_INPUT_MOUSEDEV=y CONFIG_INPUT_MOUSEDEV=y
CONFIG_KEYBOARD_SUN4I_LRADC=m CONFIG_KEYBOARD_SUN4I_LRADC=m
CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250=y
...@@ -150,7 +148,6 @@ CONFIG_SERIAL_8250_CONSOLE=y ...@@ -150,7 +148,6 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DW=y CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
CONFIG_SERIAL_SH_SCI=y
CONFIG_VIRTIO_CONSOLE=y CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y CONFIG_HW_RANDOM_VIRTIO=y
...@@ -160,11 +157,9 @@ CONFIG_I2C_CHARDEV=m ...@@ -160,11 +157,9 @@ CONFIG_I2C_CHARDEV=m
CONFIG_I2C_DESIGNWARE_CORE=y CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_MV64XXX=m CONFIG_I2C_MV64XXX=m
CONFIG_I2C_RIIC=y
CONFIG_SPI=y CONFIG_SPI=y
CONFIG_SPI_CADENCE_QUADSPI=m CONFIG_SPI_CADENCE_QUADSPI=m
CONFIG_SPI_PL022=m CONFIG_SPI_PL022=m
CONFIG_SPI_RSPI=m
CONFIG_SPI_SIFIVE=y CONFIG_SPI_SIFIVE=y
CONFIG_SPI_SUN6I=y CONFIG_SPI_SUN6I=y
# CONFIG_PTP_1588_CLOCK is not set # CONFIG_PTP_1588_CLOCK is not set
...@@ -177,7 +172,6 @@ CONFIG_POWER_RESET_GPIO_RESTART=y ...@@ -177,7 +172,6 @@ CONFIG_POWER_RESET_GPIO_RESTART=y
CONFIG_SENSORS_SFCTEMP=m CONFIG_SENSORS_SFCTEMP=m
CONFIG_CPU_THERMAL=y CONFIG_CPU_THERMAL=y
CONFIG_DEVFREQ_THERMAL=y CONFIG_DEVFREQ_THERMAL=y
CONFIG_RZG2L_THERMAL=y
CONFIG_WATCHDOG=y CONFIG_WATCHDOG=y
CONFIG_SUNXI_WATCHDOG=y CONFIG_SUNXI_WATCHDOG=y
CONFIG_MFD_AXP20X_I2C=y CONFIG_MFD_AXP20X_I2C=y
...@@ -206,11 +200,11 @@ CONFIG_USB=y ...@@ -206,11 +200,11 @@ CONFIG_USB=y
CONFIG_USB_OTG=y CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PLATFORM=y CONFIG_USB_XHCI_PLATFORM=y
# CONFIG_USB_XHCI_RCAR is not set
CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_RENESAS_USBHS=m
CONFIG_USB_STORAGE=y CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y CONFIG_USB_UAS=y
CONFIG_USB_CDNS_SUPPORT=m CONFIG_USB_CDNS_SUPPORT=m
...@@ -222,7 +216,6 @@ CONFIG_USB_MUSB_HDRC=m ...@@ -222,7 +216,6 @@ CONFIG_USB_MUSB_HDRC=m
CONFIG_USB_MUSB_SUNXI=m CONFIG_USB_MUSB_SUNXI=m
CONFIG_NOP_USB_XCEIV=m CONFIG_NOP_USB_XCEIV=m
CONFIG_USB_GADGET=y CONFIG_USB_GADGET=y
CONFIG_USB_RENESAS_USBHS_UDC=m
CONFIG_USB_CONFIGFS=m CONFIG_USB_CONFIGFS=m
CONFIG_USB_CONFIGFS_SERIAL=y CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_ACM=y CONFIG_USB_CONFIGFS_ACM=y
...@@ -240,7 +233,6 @@ CONFIG_MMC_SDHCI_PLTFM=y ...@@ -240,7 +233,6 @@ CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_OF_DWCMSHC=y CONFIG_MMC_SDHCI_OF_DWCMSHC=y
CONFIG_MMC_SDHCI_CADENCE=y CONFIG_MMC_SDHCI_CADENCE=y
CONFIG_MMC_SPI=y CONFIG_MMC_SPI=y
CONFIG_MMC_SDHI=y
CONFIG_MMC_DW=y CONFIG_MMC_DW=y
CONFIG_MMC_DW_STARFIVE=y CONFIG_MMC_DW_STARFIVE=y
CONFIG_MMC_SUNXI=y CONFIG_MMC_SUNXI=y
...@@ -258,7 +250,6 @@ CONFIG_CLK_SOPHGO_SG2042_PLL=y ...@@ -258,7 +250,6 @@ CONFIG_CLK_SOPHGO_SG2042_PLL=y
CONFIG_CLK_SOPHGO_SG2042_CLKGEN=y CONFIG_CLK_SOPHGO_SG2042_CLKGEN=y
CONFIG_CLK_SOPHGO_SG2042_RPGATE=y CONFIG_CLK_SOPHGO_SG2042_RPGATE=y
CONFIG_SUN8I_DE2_CCU=m CONFIG_SUN8I_DE2_CCU=m
CONFIG_RENESAS_OSTM=y
CONFIG_SUN50I_IOMMU=y CONFIG_SUN50I_IOMMU=y
CONFIG_RPMSG_CHAR=y CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_CTRL=y CONFIG_RPMSG_CTRL=y
...@@ -266,7 +257,6 @@ CONFIG_RPMSG_VIRTIO=y ...@@ -266,7 +257,6 @@ CONFIG_RPMSG_VIRTIO=y
CONFIG_PM_DEVFREQ=y CONFIG_PM_DEVFREQ=y
CONFIG_IIO=y CONFIG_IIO=y
CONFIG_PHY_SUN4I_USB=m CONFIG_PHY_SUN4I_USB=m
CONFIG_PHY_RCAR_GEN3_USB2=y
CONFIG_PHY_STARFIVE_JH7110_DPHY_RX=m CONFIG_PHY_STARFIVE_JH7110_DPHY_RX=m
CONFIG_PHY_STARFIVE_JH7110_PCIE=m CONFIG_PHY_STARFIVE_JH7110_PCIE=m
CONFIG_PHY_STARFIVE_JH7110_USB=m CONFIG_PHY_STARFIVE_JH7110_USB=m
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
1: 1:
.endm .endm
ENTRY(sifive_cip_453_page_fault_trp) SYM_FUNC_START(sifive_cip_453_page_fault_trp)
ADD_SIGN_EXT a0, t0, t1 ADD_SIGN_EXT a0, t0, t1
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
la t0, do_page_fault la t0, do_page_fault
...@@ -29,10 +29,10 @@ ENTRY(sifive_cip_453_page_fault_trp) ...@@ -29,10 +29,10 @@ ENTRY(sifive_cip_453_page_fault_trp)
la t0, do_trap_unknown la t0, do_trap_unknown
#endif #endif
jr t0 jr t0
END(sifive_cip_453_page_fault_trp) SYM_FUNC_END(sifive_cip_453_page_fault_trp)
ENTRY(sifive_cip_453_insn_fault_trp) SYM_FUNC_START(sifive_cip_453_insn_fault_trp)
ADD_SIGN_EXT a0, t0, t1 ADD_SIGN_EXT a0, t0, t1
la t0, do_trap_insn_fault la t0, do_trap_insn_fault
jr t0 jr t0
END(sifive_cip_453_insn_fault_trp) SYM_FUNC_END(sifive_cip_453_insn_fault_trp)
...@@ -91,10 +91,8 @@ static inline void acpi_get_cbo_block_size(struct acpi_table_header *table, ...@@ -91,10 +91,8 @@ static inline void acpi_get_cbo_block_size(struct acpi_table_header *table,
#endif /* CONFIG_ACPI */ #endif /* CONFIG_ACPI */
#ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_ACPI_NUMA
int acpi_numa_get_nid(unsigned int cpu);
void acpi_map_cpus_to_nodes(void); void acpi_map_cpus_to_nodes(void);
#else #else
static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
static inline void acpi_map_cpus_to_nodes(void) { } static inline void acpi_map_cpus_to_nodes(void) { }
#endif /* CONFIG_ACPI_NUMA */ #endif /* CONFIG_ACPI_NUMA */
......
...@@ -222,44 +222,44 @@ static __always_inline int variable_fls(unsigned int x) ...@@ -222,44 +222,44 @@ static __always_inline int variable_fls(unsigned int x)
#define __NOT(x) (~(x)) #define __NOT(x) (~(x))
/** /**
* test_and_set_bit - Set a bit and return its old value * arch_test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set * @nr: Bit to set
* @addr: Address to count from * @addr: Address to count from
* *
* This operation may be reordered on other architectures than x86. * This operation may be reordered on other architectures than x86.
*/ */
static inline int test_and_set_bit(int nr, volatile unsigned long *addr) static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
{ {
return __test_and_op_bit(or, __NOP, nr, addr); return __test_and_op_bit(or, __NOP, nr, addr);
} }
/** /**
* test_and_clear_bit - Clear a bit and return its old value * arch_test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear * @nr: Bit to clear
* @addr: Address to count from * @addr: Address to count from
* *
* This operation can be reordered on other architectures other than x86. * This operation can be reordered on other architectures other than x86.
*/ */
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
{ {
return __test_and_op_bit(and, __NOT, nr, addr); return __test_and_op_bit(and, __NOT, nr, addr);
} }
/** /**
* test_and_change_bit - Change a bit and return its old value * arch_test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change * @nr: Bit to change
* @addr: Address to count from * @addr: Address to count from
* *
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
static inline int test_and_change_bit(int nr, volatile unsigned long *addr) static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
{ {
return __test_and_op_bit(xor, __NOP, nr, addr); return __test_and_op_bit(xor, __NOP, nr, addr);
} }
/** /**
* set_bit - Atomically set a bit in memory * arch_set_bit - Atomically set a bit in memory
* @nr: the bit to set * @nr: the bit to set
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -270,13 +270,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) ...@@ -270,13 +270,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not * Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity. * restricted to acting on a single-word quantity.
*/ */
static inline void set_bit(int nr, volatile unsigned long *addr) static inline void arch_set_bit(int nr, volatile unsigned long *addr)
{ {
__op_bit(or, __NOP, nr, addr); __op_bit(or, __NOP, nr, addr);
} }
/** /**
* clear_bit - Clears a bit in memory * arch_clear_bit - Clears a bit in memory
* @nr: Bit to clear * @nr: Bit to clear
* @addr: Address to start counting from * @addr: Address to start counting from
* *
...@@ -284,13 +284,13 @@ static inline void set_bit(int nr, volatile unsigned long *addr) ...@@ -284,13 +284,13 @@ static inline void set_bit(int nr, volatile unsigned long *addr)
* on non x86 architectures, so if you are writing portable code, * on non x86 architectures, so if you are writing portable code,
* make sure not to rely on its reordering guarantees. * make sure not to rely on its reordering guarantees.
*/ */
static inline void clear_bit(int nr, volatile unsigned long *addr) static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
{ {
__op_bit(and, __NOT, nr, addr); __op_bit(and, __NOT, nr, addr);
} }
/** /**
* change_bit - Toggle a bit in memory * arch_change_bit - Toggle a bit in memory
* @nr: Bit to change * @nr: Bit to change
* @addr: Address to start counting from * @addr: Address to start counting from
* *
...@@ -298,40 +298,40 @@ static inline void clear_bit(int nr, volatile unsigned long *addr) ...@@ -298,40 +298,40 @@ static inline void clear_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not * Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity. * restricted to acting on a single-word quantity.
*/ */
static inline void change_bit(int nr, volatile unsigned long *addr) static inline void arch_change_bit(int nr, volatile unsigned long *addr)
{ {
__op_bit(xor, __NOP, nr, addr); __op_bit(xor, __NOP, nr, addr);
} }
/** /**
* test_and_set_bit_lock - Set a bit and return its old value, for lock * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set * @nr: Bit to set
* @addr: Address to count from * @addr: Address to count from
* *
* This operation is atomic and provides acquire barrier semantics. * This operation is atomic and provides acquire barrier semantics.
* It can be used to implement bit locks. * It can be used to implement bit locks.
*/ */
static inline int test_and_set_bit_lock( static inline int arch_test_and_set_bit_lock(
unsigned long nr, volatile unsigned long *addr) unsigned long nr, volatile unsigned long *addr)
{ {
return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq); return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
} }
/** /**
* clear_bit_unlock - Clear a bit in memory, for unlock * arch_clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set * @nr: the bit to set
* @addr: the address to start counting from * @addr: the address to start counting from
* *
* This operation is atomic and provides release barrier semantics. * This operation is atomic and provides release barrier semantics.
*/ */
static inline void clear_bit_unlock( static inline void arch_clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr) unsigned long nr, volatile unsigned long *addr)
{ {
__op_bit_ord(and, __NOT, nr, addr, .rl); __op_bit_ord(and, __NOT, nr, addr, .rl);
} }
/** /**
* __clear_bit_unlock - Clear a bit in memory, for unlock * arch___clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set * @nr: the bit to set
* @addr: the address to start counting from * @addr: the address to start counting from
* *
...@@ -345,13 +345,13 @@ static inline void clear_bit_unlock( ...@@ -345,13 +345,13 @@ static inline void clear_bit_unlock(
* non-atomic property here: it's a lot more instructions and we still have to * non-atomic property here: it's a lot more instructions and we still have to
* provide release semantics anyway. * provide release semantics anyway.
*/ */
static inline void __clear_bit_unlock( static inline void arch___clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr) unsigned long nr, volatile unsigned long *addr)
{ {
clear_bit_unlock(nr, addr); arch_clear_bit_unlock(nr, addr);
} }
static inline bool xor_unlock_is_negative_byte(unsigned long mask, static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
volatile unsigned long *addr) volatile unsigned long *addr)
{ {
unsigned long res; unsigned long res;
...@@ -369,6 +369,9 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask, ...@@ -369,6 +369,9 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask,
#undef __NOT #undef __NOT
#undef __AMO #undef __AMO
#include <asm-generic/bitops/instrumented-atomic.h>
#include <asm-generic/bitops/instrumented-lock.h>
#include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/le.h> #include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h> #include <asm-generic/bitops/ext2-atomic.h>
......
...@@ -46,7 +46,23 @@ do { \ ...@@ -46,7 +46,23 @@ do { \
} while (0) } while (0)
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end) extern u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1];
extern char _end[];
#define flush_cache_vmap flush_cache_vmap
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
if (is_vmalloc_or_module_addr((void *)start)) {
int i;
/*
* We don't care if concurrently a cpu resets this value since
* the only place this can happen is in handle_exception() where
* an sfence.vma is emitted.
*/
for (i = 0; i < ARRAY_SIZE(new_vmalloc); ++i)
new_vmalloc[i] = -1ULL;
}
}
#define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end) #define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end)
#endif #endif
......
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_EXEC_H
#define __ASM_EXEC_H
extern unsigned long arch_align_stack(unsigned long sp);
#endif /* __ASM_EXEC_H */
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _ASM_RISCV_FENCE_H #ifndef _ASM_RISCV_FENCE_H
#define _ASM_RISCV_FENCE_H #define _ASM_RISCV_FENCE_H
......
...@@ -92,6 +92,7 @@ ...@@ -92,6 +92,7 @@
#define RISCV_ISA_EXT_ZCF 83 #define RISCV_ISA_EXT_ZCF 83
#define RISCV_ISA_EXT_ZCMOP 84 #define RISCV_ISA_EXT_ZCMOP 84
#define RISCV_ISA_EXT_ZAWRS 85 #define RISCV_ISA_EXT_ZAWRS 85
#define RISCV_ISA_EXT_SVVPTC 86
#define RISCV_ISA_EXT_XLINUXENVCFG 127 #define RISCV_ISA_EXT_XLINUXENVCFG 127
......
...@@ -14,6 +14,11 @@ ...@@ -14,6 +14,11 @@
#define INVALID_CONTEXT UINT_MAX #define INVALID_CONTEXT UINT_MAX
#ifdef CONFIG_SMP
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#endif
void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void)); void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void));
struct fwnode_handle *riscv_get_intc_hwnode(void); struct fwnode_handle *riscv_get_intc_hwnode(void);
......
...@@ -112,11 +112,13 @@ struct kernel_mapping { ...@@ -112,11 +112,13 @@ struct kernel_mapping {
/* Offset between linear mapping virtual address and kernel load address */ /* Offset between linear mapping virtual address and kernel load address */
unsigned long va_pa_offset; unsigned long va_pa_offset;
/* Offset between kernel mapping virtual address and kernel load address */ /* Offset between kernel mapping virtual address and kernel load address */
unsigned long va_kernel_pa_offset;
unsigned long va_kernel_xip_pa_offset;
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
unsigned long va_kernel_xip_text_pa_offset;
unsigned long va_kernel_xip_data_pa_offset;
uintptr_t xiprom; uintptr_t xiprom;
uintptr_t xiprom_sz; uintptr_t xiprom_sz;
#else
unsigned long va_kernel_pa_offset;
#endif #endif
}; };
...@@ -134,12 +136,18 @@ extern phys_addr_t phys_ram_base; ...@@ -134,12 +136,18 @@ extern phys_addr_t phys_ram_base;
#else #else
void *linear_mapping_pa_to_va(unsigned long x); void *linear_mapping_pa_to_va(unsigned long x);
#endif #endif
#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_pa_to_va(y) ({ \ #define kernel_mapping_pa_to_va(y) ({ \
unsigned long _y = (unsigned long)(y); \ unsigned long _y = (unsigned long)(y); \
(IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \ (_y < phys_ram_base) ? \
(void *)(_y + kernel_map.va_kernel_xip_pa_offset) : \ (void *)(_y + kernel_map.va_kernel_xip_text_pa_offset) : \
(void *)(_y + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \ (void *)(_y + kernel_map.va_kernel_xip_data_pa_offset); \
}) })
#else
#define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset))
#endif
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
#ifndef CONFIG_DEBUG_VIRTUAL #ifndef CONFIG_DEBUG_VIRTUAL
...@@ -147,12 +155,17 @@ void *linear_mapping_pa_to_va(unsigned long x); ...@@ -147,12 +155,17 @@ void *linear_mapping_pa_to_va(unsigned long x);
#else #else
phys_addr_t linear_mapping_va_to_pa(unsigned long x); phys_addr_t linear_mapping_va_to_pa(unsigned long x);
#endif #endif
#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_va_to_pa(y) ({ \ #define kernel_mapping_va_to_pa(y) ({ \
unsigned long _y = (unsigned long)(y); \ unsigned long _y = (unsigned long)(y); \
(IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \ (_y < kernel_map.virt_addr + kernel_map.xiprom_sz) ? \
(_y - kernel_map.va_kernel_xip_pa_offset) : \ (_y - kernel_map.va_kernel_xip_text_pa_offset) : \
(_y - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \ (_y - kernel_map.va_kernel_xip_data_pa_offset); \
}) })
#else
#define kernel_mapping_va_to_pa(y) ((unsigned long)(y) - kernel_map.va_kernel_pa_offset)
#endif
#define __va_to_pa_nodebug(x) ({ \ #define __va_to_pa_nodebug(x) ({ \
unsigned long _x = x; \ unsigned long _x = x; \
......
...@@ -107,13 +107,6 @@ ...@@ -107,13 +107,6 @@
#endif #endif
#ifdef CONFIG_XIP_KERNEL
#define XIP_OFFSET SZ_32M
#define XIP_OFFSET_MASK (SZ_32M - 1)
#else
#define XIP_OFFSET 0
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/page.h> #include <asm/page.h>
...@@ -142,11 +135,14 @@ ...@@ -142,11 +135,14 @@
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
#define XIP_FIXUP(addr) ({ \ #define XIP_FIXUP(addr) ({ \
extern char _sdata[], _start[], _end[]; \
uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR \
+ (uintptr_t)&_sdata - (uintptr_t)&_start; \
uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR \
+ (uintptr_t)&_end - (uintptr_t)&_start; \
uintptr_t __a = (uintptr_t)(addr); \ uintptr_t __a = (uintptr_t)(addr); \
(__a >= CONFIG_XIP_PHYS_ADDR && \ (__a >= __rom_start_data && __a < __rom_end_data) ? \
__a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \ __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a; \
__a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
__a; \
}) })
#else #else
#define XIP_FIXUP(addr) (addr) #define XIP_FIXUP(addr) (addr)
...@@ -501,6 +497,9 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, ...@@ -501,6 +497,9 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address, struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr) pte_t *ptep, unsigned int nr)
{ {
asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
: : : : svvptc);
/* /*
* The kernel assumes that TLBs don't cache invalid entries, but * The kernel assumes that TLBs don't cache invalid entries, but
* in RISC-V, SFENCE.VMA specifies an ordering constraint, not a * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
...@@ -510,6 +509,13 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, ...@@ -510,6 +509,13 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
*/ */
while (nr--) while (nr--)
local_flush_tlb_page(address + nr * PAGE_SIZE); local_flush_tlb_page(address + nr * PAGE_SIZE);
svvptc:;
/*
* Svvptc guarantees that the new valid pte will be visible within
* a bounded timeframe, so when the uarch does not cache invalid
* entries, we don't have to do anything.
*/
} }
#define update_mmu_cache(vma, addr, ptep) \ #define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1) update_mmu_cache_range(NULL, vma, addr, ptep, 1)
......
...@@ -159,6 +159,7 @@ struct riscv_pmu_snapshot_data { ...@@ -159,6 +159,7 @@ struct riscv_pmu_snapshot_data {
#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0) #define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
#define RISCV_PMU_RAW_EVENT_IDX 0x20000 #define RISCV_PMU_RAW_EVENT_IDX 0x20000
#define RISCV_PLAT_FW_EVENT 0xFFFF
/** General pmu event codes specified in SBI PMU extension */ /** General pmu event codes specified in SBI PMU extension */
enum sbi_pmu_hw_generic_events_t { enum sbi_pmu_hw_generic_events_t {
......
...@@ -46,7 +46,7 @@ bool kernel_page_present(struct page *page); ...@@ -46,7 +46,7 @@ bool kernel_page_present(struct page *page);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#ifdef CONFIG_STRICT_KERNEL_RWX #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_XIP_KERNEL)
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define SECTION_ALIGN (1 << 21) #define SECTION_ALIGN (1 << 21)
#else #else
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define MAX_PHYSMEM_BITS 56 #define MAX_PHYSMEM_BITS 56
#else #else
#define MAX_PHYSMEM_BITS 34 #define MAX_PHYSMEM_BITS 32
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
#define SECTION_SIZE_BITS 27 #define SECTION_SIZE_BITS 27
#endif /* CONFIG_SPARSEMEM */ #endif /* CONFIG_SPARSEMEM */
......
...@@ -19,6 +19,7 @@ extern asmlinkage void *__memcpy(void *, const void *, size_t); ...@@ -19,6 +19,7 @@ extern asmlinkage void *__memcpy(void *, const void *, size_t);
extern asmlinkage void *memmove(void *, const void *, size_t); extern asmlinkage void *memmove(void *, const void *, size_t);
extern asmlinkage void *__memmove(void *, const void *, size_t); extern asmlinkage void *__memmove(void *, const void *, size_t);
#if !(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
#define __HAVE_ARCH_STRCMP #define __HAVE_ARCH_STRCMP
extern asmlinkage int strcmp(const char *cs, const char *ct); extern asmlinkage int strcmp(const char *cs, const char *ct);
...@@ -27,6 +28,7 @@ extern asmlinkage __kernel_size_t strlen(const char *); ...@@ -27,6 +28,7 @@ extern asmlinkage __kernel_size_t strlen(const char *);
#define __HAVE_ARCH_STRNCMP #define __HAVE_ARCH_STRNCMP
extern asmlinkage int strncmp(const char *cs, const char *ct, size_t count); extern asmlinkage int strncmp(const char *cs, const char *ct, size_t count);
#endif
/* For those files which don't want to check by kasan. */ /* For those files which don't want to check by kasan. */
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
......
...@@ -61,6 +61,13 @@ struct thread_info { ...@@ -61,6 +61,13 @@ struct thread_info {
void *scs_base; void *scs_base;
void *scs_sp; void *scs_sp;
#endif #endif
#ifdef CONFIG_64BIT
/*
* Used in handle_exception() to save a0, a1 and a2 before knowing if we
* can access the kernel stack.
*/
unsigned long a0, a1, a2;
#endif
}; };
#ifdef CONFIG_SHADOW_CALL_STACK #ifdef CONFIG_SHADOW_CALL_STACK
...@@ -112,8 +119,4 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); ...@@ -112,8 +119,4 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE) #define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE)
#define _TIF_WORK_MASK \
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
_TIF_NOTIFY_SIGNAL | _TIF_UPROBE)
#endif /* _ASM_RISCV_THREAD_INFO_H */ #endif /* _ASM_RISCV_THREAD_INFO_H */
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _ASM_RISCV_VMALLOC_H #ifndef _ASM_RISCV_VMALLOC_H
#define _ASM_RISCV_VMALLOC_H #define _ASM_RISCV_VMALLOC_H
......
...@@ -9,18 +9,36 @@ ...@@ -9,18 +9,36 @@
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
.macro XIP_FIXUP_OFFSET reg .macro XIP_FIXUP_OFFSET reg
REG_L t0, _xip_fixup /* Fix-up address in Flash into address in RAM early during boot before
* MMU is up. Because generated code "thinks" data is in Flash, but it
* is actually in RAM (actually data is also in Flash, but Flash is
* read-only, thus we need to use the data residing in RAM).
*
* The start of data in Flash is _sdata and the start of data in RAM is
* CONFIG_PHYS_RAM_BASE. So this fix-up essentially does this:
* reg += CONFIG_PHYS_RAM_BASE - _start
*/
li t0, CONFIG_PHYS_RAM_BASE
add \reg, \reg, t0 add \reg, \reg, t0
la t0, _sdata
sub \reg, \reg, t0
.endm .endm
.macro XIP_FIXUP_FLASH_OFFSET reg .macro XIP_FIXUP_FLASH_OFFSET reg
/* In linker script, at the transition from read-only section to
* writable section, the VMA is increased while LMA remains the same.
* (See in linker script how _sdata, __data_loc and LOAD_OFFSET is
* changed)
*
* Consequently, early during boot before MMU is up, the generated code
* reads the "writable" section at wrong addresses, because VMA is used
* by compiler to generate code, but the data is located in Flash using
* LMA.
*/
la t0, _sdata
sub \reg, \reg, t0
la t0, __data_loc la t0, __data_loc
REG_L t1, _xip_phys_offset
sub \reg, \reg, t1
add \reg, \reg, t0 add \reg, \reg, t0
.endm .endm
_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
#else #else
.macro XIP_FIXUP_OFFSET reg .macro XIP_FIXUP_OFFSET reg
.endm .endm
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE }; static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE };
int __init acpi_numa_get_nid(unsigned int cpu) static int __init acpi_numa_get_nid(unsigned int cpu)
{ {
return acpi_early_node_map[cpu]; return acpi_early_node_map[cpu];
} }
......
...@@ -36,6 +36,8 @@ void asm_offsets(void) ...@@ -36,6 +36,8 @@ void asm_offsets(void)
OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]); OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]);
OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]); OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]);
OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]); OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
...@@ -43,6 +45,11 @@ void asm_offsets(void) ...@@ -43,6 +45,11 @@ void asm_offsets(void)
#ifdef CONFIG_SHADOW_CALL_STACK #ifdef CONFIG_SHADOW_CALL_STACK
OFFSET(TASK_TI_SCS_SP, task_struct, thread_info.scs_sp); OFFSET(TASK_TI_SCS_SP, task_struct, thread_info.scs_sp);
#endif #endif
#ifdef CONFIG_64BIT
OFFSET(TASK_TI_A0, task_struct, thread_info.a0);
OFFSET(TASK_TI_A1, task_struct, thread_info.a1);
OFFSET(TASK_TI_A2, task_struct, thread_info.a2);
#endif
OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu); OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]); OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
......
...@@ -71,6 +71,11 @@ static void ci_leaf_init(struct cacheinfo *this_leaf, ...@@ -71,6 +71,11 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
this_leaf->type = type; this_leaf->type = type;
} }
int init_cache_level(unsigned int cpu)
{
return init_of_cache_level(cpu);
}
int populate_cache_leaves(unsigned int cpu) int populate_cache_leaves(unsigned int cpu)
{ {
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
......
...@@ -381,6 +381,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { ...@@ -381,6 +381,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL), __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
__RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT), __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
__RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT), __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
__RISCV_ISA_EXT_DATA(svvptc, RISCV_ISA_EXT_SVVPTC),
}; };
const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext); const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
......
...@@ -451,6 +451,12 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, ...@@ -451,6 +451,12 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
*(u32 *)loc = CLEAN_IMM(CJTYPE, *(u32 *)loc) | *(u32 *)loc = CLEAN_IMM(CJTYPE, *(u32 *)loc) |
ENCODE_CJTYPE_IMM(val - addr); ENCODE_CJTYPE_IMM(val - addr);
break; break;
case R_RISCV_ADD16:
*(u16 *)loc += val;
break;
case R_RISCV_SUB16:
*(u16 *)loc -= val;
break;
case R_RISCV_ADD32: case R_RISCV_ADD32:
*(u32 *)loc += val; *(u32 *)loc += val;
break; break;
......
...@@ -19,6 +19,79 @@ ...@@ -19,6 +19,79 @@
.section .irqentry.text, "ax" .section .irqentry.text, "ax"
.macro new_vmalloc_check
REG_S a0, TASK_TI_A0(tp)
csrr a0, CSR_CAUSE
/* Exclude IRQs */
blt a0, zero, _new_vmalloc_restore_context_a0
REG_S a1, TASK_TI_A1(tp)
/* Only check new_vmalloc if we are in page/protection fault */
li a1, EXC_LOAD_PAGE_FAULT
beq a0, a1, _new_vmalloc_kernel_address
li a1, EXC_STORE_PAGE_FAULT
beq a0, a1, _new_vmalloc_kernel_address
li a1, EXC_INST_PAGE_FAULT
bne a0, a1, _new_vmalloc_restore_context_a1
_new_vmalloc_kernel_address:
/* Is it a kernel address? */
csrr a0, CSR_TVAL
bge a0, zero, _new_vmalloc_restore_context_a1
/* Check if a new vmalloc mapping appeared that could explain the trap */
REG_S a2, TASK_TI_A2(tp)
/*
* Computes:
* a0 = &new_vmalloc[BIT_WORD(cpu)]
* a1 = BIT_MASK(cpu)
*/
REG_L a2, TASK_TI_CPU(tp)
/*
* Compute the new_vmalloc element position:
* (cpu / 64) * 8 = (cpu >> 6) << 3
*/
srli a1, a2, 6
slli a1, a1, 3
la a0, new_vmalloc
add a0, a0, a1
/*
* Compute the bit position in the new_vmalloc element:
* bit_pos = cpu % 64 = cpu - (cpu / 64) * 64 = cpu - (cpu >> 6) << 6
* = cpu - ((cpu >> 6) << 3) << 3
*/
slli a1, a1, 3
sub a1, a2, a1
/* Compute the "get mask": 1 << bit_pos */
li a2, 1
sll a1, a2, a1
/* Check the value of new_vmalloc for this cpu */
REG_L a2, 0(a0)
and a2, a2, a1
beq a2, zero, _new_vmalloc_restore_context
/* Atomically reset the current cpu bit in new_vmalloc */
amoxor.d a0, a1, (a0)
/* Only emit a sfence.vma if the uarch caches invalid entries */
ALTERNATIVE("sfence.vma", "nop", 0, RISCV_ISA_EXT_SVVPTC, 1)
REG_L a0, TASK_TI_A0(tp)
REG_L a1, TASK_TI_A1(tp)
REG_L a2, TASK_TI_A2(tp)
csrw CSR_SCRATCH, x0
sret
_new_vmalloc_restore_context:
REG_L a2, TASK_TI_A2(tp)
_new_vmalloc_restore_context_a1:
REG_L a1, TASK_TI_A1(tp)
_new_vmalloc_restore_context_a0:
REG_L a0, TASK_TI_A0(tp)
.endm
SYM_CODE_START(handle_exception) SYM_CODE_START(handle_exception)
/* /*
* If coming from userspace, preserve the user thread pointer and load * If coming from userspace, preserve the user thread pointer and load
...@@ -30,6 +103,20 @@ SYM_CODE_START(handle_exception) ...@@ -30,6 +103,20 @@ SYM_CODE_START(handle_exception)
.Lrestore_kernel_tpsp: .Lrestore_kernel_tpsp:
csrr tp, CSR_SCRATCH csrr tp, CSR_SCRATCH
#ifdef CONFIG_64BIT
/*
* The RISC-V kernel does not eagerly emit a sfence.vma after each
* new vmalloc mapping, which may result in exceptions:
* - if the uarch caches invalid entries, the new mapping would not be
* observed by the page table walker and an invalidation is needed.
* - if the uarch does not cache invalid entries, a reordered access
* could "miss" the new mapping and traps: in that case, we only need
* to retry the access, no sfence.vma is required.
*/
new_vmalloc_check
#endif
REG_S sp, TASK_TI_KERNEL_SP(tp) REG_S sp, TASK_TI_KERNEL_SP(tp)
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
...@@ -239,8 +326,8 @@ SYM_CODE_START(ret_from_fork) ...@@ -239,8 +326,8 @@ SYM_CODE_START(ret_from_fork)
jalr s0 jalr s0
1: 1:
move a0, sp /* pt_regs */ move a0, sp /* pt_regs */
la ra, ret_from_exception call syscall_exit_to_user_mode
tail syscall_exit_to_user_mode j ret_from_exception
SYM_CODE_END(ret_from_fork) SYM_CODE_END(ret_from_fork)
#ifdef CONFIG_IRQ_STACKS #ifdef CONFIG_IRQ_STACKS
......
...@@ -787,8 +787,8 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, ...@@ -787,8 +787,8 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
int res; int res;
unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel); unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
struct hlist_head *relocation_hashtable; struct hlist_head *relocation_hashtable;
struct list_head used_buckets_list;
unsigned int hashtable_bits; unsigned int hashtable_bits;
LIST_HEAD(used_buckets_list);
hashtable_bits = initialize_relocation_hashtable(num_relocations, hashtable_bits = initialize_relocation_hashtable(num_relocations,
&relocation_hashtable); &relocation_hashtable);
...@@ -796,8 +796,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, ...@@ -796,8 +796,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
if (!relocation_hashtable) if (!relocation_hashtable)
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&used_buckets_list);
pr_debug("Applying relocate section %u to %u\n", relsec, pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info); sechdrs[relsec].sh_info);
......
...@@ -6,37 +6,9 @@ ...@@ -6,37 +6,9 @@
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
/* static bool fill_callchain(void *entry, unsigned long pc)
* Get the return address for a single stackframe and return a pointer to the
* next frame tail.
*/
static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
unsigned long fp, unsigned long reg_ra)
{ {
struct stackframe buftail; return perf_callchain_store(entry, pc) == 0;
unsigned long ra = 0;
unsigned long __user *user_frame_tail =
(unsigned long __user *)(fp - sizeof(struct stackframe));
/* Check accessibility of one struct frame_tail beyond */
if (!access_ok(user_frame_tail, sizeof(buftail)))
return 0;
if (__copy_from_user_inatomic(&buftail, user_frame_tail,
sizeof(buftail)))
return 0;
if (reg_ra != 0)
ra = reg_ra;
else
ra = buftail.ra;
fp = buftail.fp;
if (ra != 0)
perf_callchain_store(entry, ra);
else
return 0;
return fp;
} }
/* /*
...@@ -56,19 +28,7 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, ...@@ -56,19 +28,7 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
void perf_callchain_user(struct perf_callchain_entry_ctx *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
unsigned long fp = 0; arch_stack_walk_user(fill_callchain, entry, regs);
fp = regs->s0;
perf_callchain_store(entry, regs->epc);
fp = user_backtrace(entry, fp, regs->ra);
while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
fp = user_backtrace(entry, fp, 0);
}
static bool fill_callchain(void *entry, unsigned long pc)
{
return perf_callchain_store(entry, pc) == 0;
} }
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
......
...@@ -5,6 +5,7 @@ KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \ ...@@ -5,6 +5,7 @@ KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \
-Os -DDISABLE_BRANCH_PROFILING $(DISABLE_STACKLEAK_PLUGIN) \ -Os -DDISABLE_BRANCH_PROFILING $(DISABLE_STACKLEAK_PLUGIN) \
$(call cc-option,-mbranch-protection=none) \ $(call cc-option,-mbranch-protection=none) \
-I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \ -I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \
-include $(srctree)/include/linux/hidden.h \
-D__DISABLE_EXPORTS -ffreestanding \ -D__DISABLE_EXPORTS -ffreestanding \
-fno-asynchronous-unwind-tables -fno-unwind-tables \ -fno-asynchronous-unwind-tables -fno-unwind-tables \
$(call cc-option,-fno-addrsig) $(call cc-option,-fno-addrsig)
...@@ -16,6 +17,7 @@ KBUILD_CFLAGS += -mcmodel=medany ...@@ -16,6 +17,7 @@ KBUILD_CFLAGS += -mcmodel=medany
CFLAGS_cmdline_early.o += -D__NO_FORTIFY CFLAGS_cmdline_early.o += -D__NO_FORTIFY
CFLAGS_lib-fdt_ro.o += -D__NO_FORTIFY CFLAGS_lib-fdt_ro.o += -D__NO_FORTIFY
CFLAGS_fdt_early.o += -D__NO_FORTIFY
$(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \ $(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \
--remove-section=.note.gnu.property \ --remove-section=.note.gnu.property \
...@@ -32,5 +34,5 @@ $(obj)/string.o: $(srctree)/lib/string.c FORCE ...@@ -32,5 +34,5 @@ $(obj)/string.o: $(srctree)/lib/string.c FORCE
$(obj)/ctype.o: $(srctree)/lib/ctype.c FORCE $(obj)/ctype.o: $(srctree)/lib/ctype.c FORCE
$(call if_changed_rule,cc_o_c) $(call if_changed_rule,cc_o_c)
obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o archrandom_early.pi.o
extra-y := $(patsubst %.pi.o,%.o,$(obj-y)) extra-y := $(patsubst %.pi.o,%.o,$(obj-y))
// SPDX-License-Identifier: GPL-2.0-only
#include <asm/csr.h>
#include <linux/processor.h>
#include "pi.h"
/*
* To avoid rewriting code include asm/archrandom.h and create macros
* for the functions that won't be included.
*/
#undef riscv_has_extension_unlikely
#define riscv_has_extension_likely(...) false
#undef pr_err_once
#define pr_err_once(...)
#include <asm/archrandom.h>
u64 get_kaslr_seed_zkr(const uintptr_t dtb_pa)
{
unsigned long seed = 0;
if (!fdt_early_match_extension_isa((const void *)dtb_pa, "zkr"))
return 0;
if (!csr_seed_long(&seed))
return 0;
return seed;
}
...@@ -6,15 +6,9 @@ ...@@ -6,15 +6,9 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/setup.h> #include <asm/setup.h>
static char early_cmdline[COMMAND_LINE_SIZE]; #include "pi.h"
/* static char early_cmdline[COMMAND_LINE_SIZE];
* Declare the functions that are exported (but prefixed) here so that LLVM
* does not complain it lacks the 'static' keyword (which, if added, makes
* LLVM complain because the function is actually unused in this file).
*/
u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa);
bool set_nokaslr_from_cmdline(uintptr_t dtb_pa);
static char *get_early_cmdline(uintptr_t dtb_pa) static char *get_early_cmdline(uintptr_t dtb_pa)
{ {
......
...@@ -2,13 +2,9 @@ ...@@ -2,13 +2,9 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/libfdt.h> #include <linux/libfdt.h>
#include <linux/ctype.h>
/* #include "pi.h"
* Declare the functions that are exported (but prefixed) here so that LLVM
* does not complain it lacks the 'static' keyword (which, if added, makes
* LLVM complain because the function is actually unused in this file).
*/
u64 get_kaslr_seed(uintptr_t dtb_pa);
u64 get_kaslr_seed(uintptr_t dtb_pa) u64 get_kaslr_seed(uintptr_t dtb_pa)
{ {
...@@ -28,3 +24,162 @@ u64 get_kaslr_seed(uintptr_t dtb_pa) ...@@ -28,3 +24,162 @@ u64 get_kaslr_seed(uintptr_t dtb_pa)
*prop = 0; *prop = 0;
return ret; return ret;
} }
/**
* fdt_device_is_available - check if a device is available for use
*
* @fdt: pointer to the device tree blob
* @node: offset of the node whose property to find
*
* Returns true if the status property is absent or set to "okay" or "ok",
* false otherwise
*/
static bool fdt_device_is_available(const void *fdt, int node)
{
const char *status;
int statlen;
status = fdt_getprop(fdt, node, "status", &statlen);
if (!status)
return true;
if (statlen > 0) {
if (!strcmp(status, "okay") || !strcmp(status, "ok"))
return true;
}
return false;
}
/* Copy of fdt_nodename_eq_ */
static int fdt_node_name_eq(const void *fdt, int offset,
const char *s)
{
int olen;
int len = strlen(s);
const char *p = fdt_get_name(fdt, offset, &olen);
if (!p || olen < len)
/* short match */
return 0;
if (memcmp(p, s, len) != 0)
return 0;
if (p[len] == '\0')
return 1;
else if (!memchr(s, '@', len) && (p[len] == '@'))
return 1;
else
return 0;
}
/**
* isa_string_contains - check if isa string contains an extension
*
* @isa_str: isa string to search
* @ext_name: the extension to search for
*
* Returns true if the extension is in the given isa string,
* false otherwise
*/
static bool isa_string_contains(const char *isa_str, const char *ext_name)
{
size_t i, single_end, len = strlen(ext_name);
char ext_end;
/* Error must contain rv32/64 */
if (strlen(isa_str) < 4)
return false;
if (len == 1) {
single_end = strcspn(isa_str, "sSxXzZ");
/* Search for single chars between rv32/64 and multi-letter extensions */
for (i = 4; i < single_end; i++) {
if (tolower(isa_str[i]) == ext_name[0])
return true;
}
return false;
}
/* Skip to start of multi-letter extensions */
isa_str = strpbrk(isa_str, "sSxXzZ");
while (isa_str) {
if (strncasecmp(isa_str, ext_name, len) == 0) {
ext_end = isa_str[len];
/* Check if matches the whole extension. */
if (ext_end == '\0' || ext_end == '_')
return true;
}
/* Multi-letter extensions must be split from other multi-letter
* extensions with an "_", the end of a multi-letter extension will
* either be the null character or the "_" at the start of the next
* multi-letter extension.
*/
isa_str = strchr(isa_str, '_');
if (isa_str)
isa_str++;
}
return false;
}
/**
* early_cpu_isa_ext_available - check if cpu node has an extension
*
* @fdt: pointer to the device tree blob
* @node: offset of the cpu node
* @ext_name: the extension to search for
*
* Returns true if the cpu node has the extension,
* false otherwise
*/
static bool early_cpu_isa_ext_available(const void *fdt, int node, const char *ext_name)
{
const void *prop;
int len;
prop = fdt_getprop(fdt, node, "riscv,isa-extensions", &len);
if (prop && fdt_stringlist_contains(prop, len, ext_name))
return true;
prop = fdt_getprop(fdt, node, "riscv,isa", &len);
if (prop && isa_string_contains(prop, ext_name))
return true;
return false;
}
/**
* fdt_early_match_extension_isa - check if all cpu nodes have an extension
*
* @fdt: pointer to the device tree blob
* @ext_name: the extension to search for
*
* Returns true if the all available the cpu nodes have the extension,
* false otherwise
*/
bool fdt_early_match_extension_isa(const void *fdt, const char *ext_name)
{
int node, parent;
bool ret = false;
parent = fdt_path_offset(fdt, "/cpus");
if (parent < 0)
return false;
fdt_for_each_subnode(node, fdt, parent) {
if (!fdt_node_name_eq(fdt, node, "cpu"))
continue;
if (!fdt_device_is_available(fdt, node))
continue;
if (!early_cpu_isa_ext_available(fdt, node, ext_name))
return false;
ret = true;
}
return ret;
}
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _RISCV_PI_H_
#define _RISCV_PI_H_
#include <linux/types.h>
/*
* The following functions are exported (but prefixed). Declare them here so
* that LLVM does not complain it lacks the 'static' keyword (which, if
* added, makes LLVM complain because the function is unused).
*/
u64 get_kaslr_seed(uintptr_t dtb_pa);
u64 get_kaslr_seed_zkr(const uintptr_t dtb_pa);
bool set_nokaslr_from_cmdline(uintptr_t dtb_pa);
u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa);
bool fdt_early_match_extension_isa(const void *fdt, const char *ext_name);
#endif /* _RISCV_PI_H_ */
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/tick.h> #include <linux/tick.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/personality.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -26,6 +27,7 @@ ...@@ -26,6 +27,7 @@
#include <asm/cpuidle.h> #include <asm/cpuidle.h>
#include <asm/vector.h> #include <asm/vector.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/exec.h>
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h> #include <linux/stackprotector.h>
...@@ -99,6 +101,13 @@ void show_regs(struct pt_regs *regs) ...@@ -99,6 +101,13 @@ void show_regs(struct pt_regs *regs)
dump_backtrace(regs, NULL, KERN_DEFAULT); dump_backtrace(regs, NULL, KERN_DEFAULT);
} }
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_u32_below(PAGE_SIZE);
return sp & ~0xf;
}
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
static bool compat_mode_supported __read_mostly; static bool compat_mode_supported __read_mostly;
......
...@@ -12,9 +12,6 @@ ...@@ -12,9 +12,6 @@
EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(__memset); EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memcpy); EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove); EXPORT_SYMBOL(__memmove);
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kexec.h> #include <linux/kexec.h>
#include <linux/kgdb.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/smp.h> #include <linux/smp.h>
...@@ -21,6 +22,7 @@ ...@@ -21,6 +22,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/irq_work.h> #include <linux/irq_work.h>
#include <linux/nmi.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -33,6 +35,8 @@ enum ipi_message_type { ...@@ -33,6 +35,8 @@ enum ipi_message_type {
IPI_CPU_CRASH_STOP, IPI_CPU_CRASH_STOP,
IPI_IRQ_WORK, IPI_IRQ_WORK,
IPI_TIMER, IPI_TIMER,
IPI_CPU_BACKTRACE,
IPI_KGDB_ROUNDUP,
IPI_MAX IPI_MAX
}; };
...@@ -113,6 +117,7 @@ void arch_irq_work_raise(void) ...@@ -113,6 +117,7 @@ void arch_irq_work_raise(void)
static irqreturn_t handle_IPI(int irq, void *data) static irqreturn_t handle_IPI(int irq, void *data)
{ {
unsigned int cpu = smp_processor_id();
int ipi = irq - ipi_virq_base; int ipi = irq - ipi_virq_base;
switch (ipi) { switch (ipi) {
...@@ -126,7 +131,7 @@ static irqreturn_t handle_IPI(int irq, void *data) ...@@ -126,7 +131,7 @@ static irqreturn_t handle_IPI(int irq, void *data)
ipi_stop(); ipi_stop();
break; break;
case IPI_CPU_CRASH_STOP: case IPI_CPU_CRASH_STOP:
ipi_cpu_crash_stop(smp_processor_id(), get_irq_regs()); ipi_cpu_crash_stop(cpu, get_irq_regs());
break; break;
case IPI_IRQ_WORK: case IPI_IRQ_WORK:
irq_work_run(); irq_work_run();
...@@ -136,8 +141,14 @@ static irqreturn_t handle_IPI(int irq, void *data) ...@@ -136,8 +141,14 @@ static irqreturn_t handle_IPI(int irq, void *data)
tick_receive_broadcast(); tick_receive_broadcast();
break; break;
#endif #endif
case IPI_CPU_BACKTRACE:
nmi_cpu_backtrace(get_irq_regs());
break;
case IPI_KGDB_ROUNDUP:
kgdb_nmicallback(cpu, get_irq_regs());
break;
default: default:
pr_warn("CPU%d: unhandled IPI%d\n", smp_processor_id(), ipi); pr_warn("CPU%d: unhandled IPI%d\n", cpu, ipi);
break; break;
} }
...@@ -203,6 +214,8 @@ static const char * const ipi_names[] = { ...@@ -203,6 +214,8 @@ static const char * const ipi_names[] = {
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts", [IPI_IRQ_WORK] = "IRQ work interrupts",
[IPI_TIMER] = "Timer broadcast interrupts", [IPI_TIMER] = "Timer broadcast interrupts",
[IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
[IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
}; };
void show_ipi_stats(struct seq_file *p, int prec) void show_ipi_stats(struct seq_file *p, int prec)
...@@ -323,3 +336,29 @@ void arch_smp_send_reschedule(int cpu) ...@@ -323,3 +336,29 @@ void arch_smp_send_reschedule(int cpu)
send_ipi_single(cpu, IPI_RESCHEDULE); send_ipi_single(cpu, IPI_RESCHEDULE);
} }
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
static void riscv_backtrace_ipi(cpumask_t *mask)
{
send_ipi_mask(mask, IPI_CPU_BACKTRACE);
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
{
nmi_trigger_cpumask_backtrace(mask, exclude_cpu, riscv_backtrace_ipi);
}
#ifdef CONFIG_KGDB
void kgdb_roundup_cpus(void)
{
int this_cpu = raw_smp_processor_id();
int cpu;
for_each_online_cpu(cpu) {
/* No need to roundup ourselves */
if (cpu == this_cpu)
continue;
send_ipi_single(cpu, IPI_KGDB_ROUNDUP);
}
}
#endif
...@@ -162,3 +162,46 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void ...@@ -162,3 +162,46 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void
{ {
walk_stackframe(task, regs, consume_entry, cookie); walk_stackframe(task, regs, consume_entry, cookie);
} }
/*
* Get the return address for a single stackframe and return a pointer to the
* next frame tail.
*/
static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry,
void *cookie, unsigned long fp,
unsigned long reg_ra)
{
struct stackframe buftail;
unsigned long ra = 0;
unsigned long __user *user_frame_tail =
(unsigned long __user *)(fp - sizeof(struct stackframe));
/* Check accessibility of one struct frame_tail beyond */
if (!access_ok(user_frame_tail, sizeof(buftail)))
return 0;
if (__copy_from_user_inatomic(&buftail, user_frame_tail,
sizeof(buftail)))
return 0;
ra = reg_ra ? : buftail.ra;
fp = buftail.fp;
if (!ra || !consume_entry(cookie, ra))
return 0;
return fp;
}
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
const struct pt_regs *regs)
{
unsigned long fp = 0;
fp = regs->s0;
if (!consume_entry(cookie, regs->epc))
return;
fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra);
while (fp && !(fp & 0x7))
fp = unwind_user_frame(consume_entry, cookie, fp, 0);
}
...@@ -45,7 +45,7 @@ $(obj)/vdso.o: $(obj)/vdso.so ...@@ -45,7 +45,7 @@ $(obj)/vdso.o: $(obj)/vdso.so
# link rule for the .so file, .lds has to be first # link rule for the .so file, .lds has to be first
$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold) $(call if_changed,vdsold)
LDFLAGS_vdso.so.dbg = -shared -S -soname=linux-vdso.so.1 \ LDFLAGS_vdso.so.dbg = -shared -soname=linux-vdso.so.1 \
--build-id=sha1 --hash-style=both --eh-frame-hdr --build-id=sha1 --hash-style=both --eh-frame-hdr
# strip rule for the .so file # strip rule for the .so file
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <linux/types.h> #include <linux/types.h>
/* All Andes vendor extensions supported in Linux */ /* All Andes vendor extensions supported in Linux */
const struct riscv_isa_ext_data riscv_isa_vendor_ext_andes[] = { static const struct riscv_isa_ext_data riscv_isa_vendor_ext_andes[] = {
__RISCV_ISA_EXT_DATA(xandespmu, RISCV_ISA_VENDOR_EXT_XANDESPMU), __RISCV_ISA_EXT_DATA(xandespmu, RISCV_ISA_VENDOR_EXT_XANDESPMU),
}; };
......
...@@ -19,6 +19,13 @@ void arch_crash_save_vmcoreinfo(void) ...@@ -19,6 +19,13 @@ void arch_crash_save_vmcoreinfo(void)
#endif #endif
#endif #endif
vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR); vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR);
#ifdef CONFIG_XIP_KERNEL
/* TODO: Communicate with crash-utility developers on the information to
* export. The XIP case is more complicated, because the virtual-physical
* address offset depends on whether the address is in ROM or in RAM.
*/
#else
vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n", vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n",
kernel_map.va_kernel_pa_offset); kernel_map.va_kernel_pa_offset);
#endif
} }
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/set_memory.h>
OUTPUT_ARCH(riscv) OUTPUT_ARCH(riscv)
ENTRY(_start) ENTRY(_start)
...@@ -65,10 +66,10 @@ SECTIONS ...@@ -65,10 +66,10 @@ SECTIONS
* From this point, stuff is considered writable and will be copied to RAM * From this point, stuff is considered writable and will be copied to RAM
*/ */
__data_loc = ALIGN(PAGE_SIZE); /* location in file */ __data_loc = ALIGN(PAGE_SIZE); /* location in file */
. = KERNEL_LINK_ADDR + XIP_OFFSET; /* location in memory */ . = ALIGN(SECTION_ALIGN); /* location in memory */
#undef LOAD_OFFSET #undef LOAD_OFFSET
#define LOAD_OFFSET (KERNEL_LINK_ADDR + XIP_OFFSET - (__data_loc & XIP_OFFSET_MASK)) #define LOAD_OFFSET (KERNEL_LINK_ADDR + _sdata - __data_loc)
_sdata = .; /* Start of data section */ _sdata = .; /* Start of data section */
_data = .; _data = .;
......
...@@ -3,9 +3,11 @@ lib-y += delay.o ...@@ -3,9 +3,11 @@ lib-y += delay.o
lib-y += memcpy.o lib-y += memcpy.o
lib-y += memset.o lib-y += memset.o
lib-y += memmove.o lib-y += memmove.o
ifeq ($(CONFIG_KASAN_GENERIC)$(CONFIG_KASAN_SW_TAGS),)
lib-y += strcmp.o lib-y += strcmp.o
lib-y += strlen.o lib-y += strlen.o
lib-y += strncmp.o lib-y += strncmp.o
endif
lib-y += csum.o lib-y += csum.o
ifeq ($(CONFIG_MMU), y) ifeq ($(CONFIG_MMU), y)
lib-$(CONFIG_RISCV_ISA_V) += uaccess_vector.o lib-$(CONFIG_RISCV_ISA_V) += uaccess_vector.o
......
...@@ -111,3 +111,5 @@ SYM_FUNC_START(__memset) ...@@ -111,3 +111,5 @@ SYM_FUNC_START(__memset)
ret ret
SYM_FUNC_END(__memset) SYM_FUNC_END(__memset)
SYM_FUNC_ALIAS_WEAK(memset, __memset) SYM_FUNC_ALIAS_WEAK(memset, __memset)
SYM_FUNC_ALIAS(__pi_memset, __memset)
SYM_FUNC_ALIAS(__pi___memset, __memset)
...@@ -120,3 +120,5 @@ strcmp_zbb: ...@@ -120,3 +120,5 @@ strcmp_zbb:
.option pop .option pop
#endif #endif
SYM_FUNC_END(strcmp) SYM_FUNC_END(strcmp)
SYM_FUNC_ALIAS(__pi_strcmp, strcmp)
EXPORT_SYMBOL(strcmp)
...@@ -131,3 +131,4 @@ strlen_zbb: ...@@ -131,3 +131,4 @@ strlen_zbb:
#endif #endif
SYM_FUNC_END(strlen) SYM_FUNC_END(strlen)
SYM_FUNC_ALIAS(__pi_strlen, strlen) SYM_FUNC_ALIAS(__pi_strlen, strlen)
EXPORT_SYMBOL(strlen)
...@@ -136,3 +136,5 @@ strncmp_zbb: ...@@ -136,3 +136,5 @@ strncmp_zbb:
.option pop .option pop
#endif #endif
SYM_FUNC_END(strncmp) SYM_FUNC_END(strncmp)
SYM_FUNC_ALIAS(__pi_strncmp, strncmp)
EXPORT_SYMBOL(strncmp)
...@@ -37,6 +37,8 @@ ...@@ -37,6 +37,8 @@
#include "../kernel/head.h" #include "../kernel/head.h"
u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1];
struct kernel_mapping kernel_map __ro_after_init; struct kernel_mapping kernel_map __ro_after_init;
EXPORT_SYMBOL(kernel_map); EXPORT_SYMBOL(kernel_map);
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
...@@ -917,7 +919,7 @@ static void __init relocate_kernel(void) ...@@ -917,7 +919,7 @@ static void __init relocate_kernel(void)
static void __init create_kernel_page_table(pgd_t *pgdir, static void __init create_kernel_page_table(pgd_t *pgdir,
__always_unused bool early) __always_unused bool early)
{ {
uintptr_t va, end_va; uintptr_t va, start_va, end_va;
/* Map the flash resident part */ /* Map the flash resident part */
end_va = kernel_map.virt_addr + kernel_map.xiprom_sz; end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
...@@ -927,10 +929,11 @@ static void __init create_kernel_page_table(pgd_t *pgdir, ...@@ -927,10 +929,11 @@ static void __init create_kernel_page_table(pgd_t *pgdir,
PMD_SIZE, PAGE_KERNEL_EXEC); PMD_SIZE, PAGE_KERNEL_EXEC);
/* Map the data in RAM */ /* Map the data in RAM */
start_va = kernel_map.virt_addr + (uintptr_t)&_sdata - (uintptr_t)&_start;
end_va = kernel_map.virt_addr + kernel_map.size; end_va = kernel_map.virt_addr + kernel_map.size;
for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE) for (va = start_va; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va, create_pgd_mapping(pgdir, va,
kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)), kernel_map.phys_addr + (va - start_va),
PMD_SIZE, PAGE_KERNEL); PMD_SIZE, PAGE_KERNEL);
} }
#else #else
...@@ -1048,6 +1051,7 @@ static void __init pt_ops_set_late(void) ...@@ -1048,6 +1051,7 @@ static void __init pt_ops_set_late(void)
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa); extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa);
extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa); extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa);
extern u64 __init __pi_get_kaslr_seed_zkr(const uintptr_t dtb_pa);
static int __init print_nokaslr(char *p) static int __init print_nokaslr(char *p)
{ {
...@@ -1068,10 +1072,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) ...@@ -1068,10 +1072,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) { if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) {
u64 kaslr_seed = __pi_get_kaslr_seed(dtb_pa); u64 kaslr_seed = __pi_get_kaslr_seed_zkr(dtb_pa);
u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start); u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
u32 nr_pos; u32 nr_pos;
if (kaslr_seed == 0)
kaslr_seed = __pi_get_kaslr_seed(dtb_pa);
/* /*
* Compute the number of positions available: we are limited * Compute the number of positions available: we are limited
* by the early page table that only has one PUD and we must * by the early page table that only has one PUD and we must
...@@ -1098,11 +1104,14 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) ...@@ -1098,11 +1104,14 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE; kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start); kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; kernel_map.va_kernel_xip_text_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
kernel_map.va_kernel_xip_data_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr
+ (uintptr_t)&_sdata - (uintptr_t)&_start;
#else #else
kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL); kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
kernel_map.phys_addr = (uintptr_t)(&_start); kernel_map.phys_addr = (uintptr_t)(&_start);
kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr; kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
#endif #endif
#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL) #if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
...@@ -1124,15 +1133,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) ...@@ -1124,15 +1133,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
*/ */
kernel_map.va_pa_offset = IS_ENABLED(CONFIG_64BIT) ? kernel_map.va_pa_offset = IS_ENABLED(CONFIG_64BIT) ?
0UL : PAGE_OFFSET - kernel_map.phys_addr; 0UL : PAGE_OFFSET - kernel_map.phys_addr;
kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
/* memory_limit = KERN_VIRT_SIZE;
* The default maximal physical memory size is KERN_VIRT_SIZE for 32-bit
* kernel, whereas for 64-bit kernel, the end of the virtual address
* space is occupied by the modules/BPF/kernel mappings which reduces
* the available size of the linear mapping.
*/
memory_limit = KERN_VIRT_SIZE - (IS_ENABLED(CONFIG_64BIT) ? SZ_4G : 0);
/* Sanity check alignment and size */ /* Sanity check alignment and size */
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0); BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
......
...@@ -9,6 +9,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma, ...@@ -9,6 +9,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, unsigned long address, pte_t *ptep,
pte_t entry, int dirty) pte_t entry, int dirty)
{ {
asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
: : : : svvptc);
if (!pte_same(ptep_get(ptep), entry)) if (!pte_same(ptep_get(ptep), entry))
__set_pte_at(vma->vm_mm, ptep, entry); __set_pte_at(vma->vm_mm, ptep, entry);
/* /*
...@@ -16,6 +19,16 @@ int ptep_set_access_flags(struct vm_area_struct *vma, ...@@ -16,6 +19,16 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
* the case that the PTE changed and the spurious fault case. * the case that the PTE changed and the spurious fault case.
*/ */
return true; return true;
svvptc:
if (!pte_same(ptep_get(ptep), entry)) {
__set_pte_at(vma->vm_mm, ptep, entry);
/* Here only not svadu is impacted */
flush_tlb_page(vma, address);
return true;
}
return false;
} }
int ptep_test_and_clear_young(struct vm_area_struct *vma, int ptep_test_and_clear_young(struct vm_area_struct *vma,
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
purgatory-y := purgatory.o sha256.o entry.o string.o ctype.o memcpy.o memset.o purgatory-y := purgatory.o sha256.o entry.o string.o ctype.o memcpy.o memset.o
ifeq ($(CONFIG_KASAN_GENERIC)$(CONFIG_KASAN_SW_TAGS),)
purgatory-y += strcmp.o strlen.o strncmp.o purgatory-y += strcmp.o strlen.o strncmp.o
endif
targets += $(purgatory-y) targets += $(purgatory-y)
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
......
...@@ -39,7 +39,6 @@ void arch_perf_update_userpage(struct perf_event *event, ...@@ -39,7 +39,6 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->cap_user_time_short = 0; userpg->cap_user_time_short = 0;
userpg->cap_user_rdpmc = riscv_perf_user_access(event); userpg->cap_user_rdpmc = riscv_perf_user_access(event);
#ifdef CONFIG_RISCV_PMU
/* /*
* The counters are 64-bit but the priv spec doesn't mandate all the * The counters are 64-bit but the priv spec doesn't mandate all the
* bits to be implemented: that's why, counter width can vary based on * bits to be implemented: that's why, counter width can vary based on
...@@ -47,7 +46,6 @@ void arch_perf_update_userpage(struct perf_event *event, ...@@ -47,7 +46,6 @@ void arch_perf_update_userpage(struct perf_event *event,
*/ */
if (userpg->cap_user_rdpmc) if (userpg->cap_user_rdpmc)
userpg->pmc_width = to_riscv_pmu(event->pmu)->ctr_get_width(event->hw.idx) + 1; userpg->pmc_width = to_riscv_pmu(event->pmu)->ctr_get_width(event->hw.idx) + 1;
#endif
do { do {
rd = sched_clock_read_begin(&seq); rd = sched_clock_read_begin(&seq);
......
...@@ -60,7 +60,7 @@ asm volatile(ALTERNATIVE( \ ...@@ -60,7 +60,7 @@ asm volatile(ALTERNATIVE( \
#define PERF_EVENT_FLAG_LEGACY BIT(SYSCTL_LEGACY) #define PERF_EVENT_FLAG_LEGACY BIT(SYSCTL_LEGACY)
PMU_FORMAT_ATTR(event, "config:0-47"); PMU_FORMAT_ATTR(event, "config:0-47");
PMU_FORMAT_ATTR(firmware, "config:63"); PMU_FORMAT_ATTR(firmware, "config:62-63");
static bool sbi_v2_available; static bool sbi_v2_available;
static DEFINE_STATIC_KEY_FALSE(sbi_pmu_snapshot_available); static DEFINE_STATIC_KEY_FALSE(sbi_pmu_snapshot_available);
...@@ -507,7 +507,6 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig) ...@@ -507,7 +507,6 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
{ {
u32 type = event->attr.type; u32 type = event->attr.type;
u64 config = event->attr.config; u64 config = event->attr.config;
int bSoftware;
u64 raw_config_val; u64 raw_config_val;
int ret; int ret;
...@@ -528,18 +527,32 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig) ...@@ -528,18 +527,32 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
break; break;
case PERF_TYPE_RAW: case PERF_TYPE_RAW:
/* /*
* As per SBI specification, the upper 16 bits must be unused for * As per SBI specification, the upper 16 bits must be unused
* a raw event. Use the MSB (63b) to distinguish between hardware * for a raw event.
* raw event and firmware events. * Bits 63:62 are used to distinguish between raw events
* 00 - Hardware raw event
* 10 - SBI firmware events
* 11 - Risc-V platform specific firmware event
*/ */
bSoftware = config >> 63;
raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK; raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
if (bSoftware) { switch (config >> 62) {
case 0:
ret = RISCV_PMU_RAW_EVENT_IDX;
*econfig = raw_config_val;
break;
case 2:
ret = (raw_config_val & 0xFFFF) | ret = (raw_config_val & 0xFFFF) |
(SBI_PMU_EVENT_TYPE_FW << 16); (SBI_PMU_EVENT_TYPE_FW << 16);
} else { break;
ret = RISCV_PMU_RAW_EVENT_IDX; case 3:
/*
* For Risc-V platform specific firmware events
* Event code - 0xFFFF
* Event data - raw event encoding
*/
ret = SBI_PMU_EVENT_TYPE_FW << 16 | RISCV_PLAT_FW_EVENT;
*econfig = raw_config_val; *econfig = raw_config_val;
break;
} }
break; break;
default: default:
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copied from the kernel sources to tools/arch/riscv:
*
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2013 Regents of the University of California
* Copyright (C) 2017 SiFive
*/
#ifndef _TOOLS_LINUX_ASM_RISCV_BARRIER_H
#define _TOOLS_LINUX_ASM_RISCV_BARRIER_H
#include <asm/fence.h>
#include <linux/compiler.h>
/* These barriers need to enforce ordering on both devices and memory. */
#define mb() RISCV_FENCE(iorw, iorw)
#define rmb() RISCV_FENCE(ir, ir)
#define wmb() RISCV_FENCE(ow, ow)
/* These barriers do not need to enforce ordering on devices, just memory. */
#define smp_mb() RISCV_FENCE(rw, rw)
#define smp_rmb() RISCV_FENCE(r, r)
#define smp_wmb() RISCV_FENCE(w, w)
#define smp_store_release(p, v) \
do { \
RISCV_FENCE(rw, w); \
WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
RISCV_FENCE(r, rw); \
___p1; \
})
#endif /* _TOOLS_LINUX_ASM_RISCV_BARRIER_H */
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copied from the kernel sources to tools/arch/riscv:
*/
#ifndef _ASM_RISCV_FENCE_H
#define _ASM_RISCV_FENCE_H
#define RISCV_FENCE_ASM(p, s) "\tfence " #p "," #s "\n"
#define RISCV_FENCE(p, s) \
({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
#endif /* _ASM_RISCV_FENCE_H */
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#include "../../arch/arm64/include/asm/barrier.h" #include "../../arch/arm64/include/asm/barrier.h"
#elif defined(__powerpc__) #elif defined(__powerpc__)
#include "../../arch/powerpc/include/asm/barrier.h" #include "../../arch/powerpc/include/asm/barrier.h"
#elif defined(__riscv)
#include "../../arch/riscv/include/asm/barrier.h"
#elif defined(__s390__) #elif defined(__s390__)
#include "../../arch/s390/include/asm/barrier.h" #include "../../arch/s390/include/asm/barrier.h"
#elif defined(__sh__) #elif defined(__sh__)
......
...@@ -55,7 +55,7 @@ static inline u64 ring_buffer_read_head(struct perf_event_mmap_page *base) ...@@ -55,7 +55,7 @@ static inline u64 ring_buffer_read_head(struct perf_event_mmap_page *base)
* READ_ONCE() + smp_mb() pair. * READ_ONCE() + smp_mb() pair.
*/ */
#if defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__) || \ #if defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__) || \
defined(__ia64__) || defined(__sparc__) && defined(__arch64__) defined(__ia64__) || defined(__sparc__) && defined(__arch64__) || defined(__riscv)
return smp_load_acquire(&base->data_head); return smp_load_acquire(&base->data_head);
#else #else
u64 head = READ_ONCE(base->data_head); u64 head = READ_ONCE(base->data_head);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment