Commit 982365a8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv-for-linus-6.4-mw2' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull more RISC-V updates from Palmer Dabbelt:

 - Support for hibernation

 - The .rela.dyn section has been moved to the init area

 - A fix for the SBI probing to allow for implementation-defined
   behavior

 - Various other fixes and cleanups throughout the tree

* tag 'riscv-for-linus-6.4-mw2' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  RISC-V: include cpufeature.h in cpufeature.c
  riscv: Move .rela.dyn to the init sections
  dt-bindings: riscv: explicitly mention assumption of Zicsr & Zifencei support
  riscv: compat_syscall_table: Fixup compile warning
  RISC-V: fixup in-flight collision with ARCH_WANT_OPTIMIZE_VMEMMAP rename
  RISC-V: fix sifive and thead section mismatches in errata
  RISC-V: Align SBI probe implementation with spec
  riscv: mm: remove redundant parameter of create_fdt_early_page_table
  riscv: Adjust dependencies of HAVE_DYNAMIC_FTRACE selection
  RISC-V: Add arch functions to support hibernation/suspend-to-disk
  RISC-V: mm: Enable huge page support to kernel_page_present() function
  RISC-V: Factor out common code of __cpu_resume_enter()
  RISC-V: Change suspend_save_csrs and suspend_restore_csrs to public function
parents 493804a6 c2d3c844
...@@ -86,6 +86,12 @@ properties: ...@@ -86,6 +86,12 @@ properties:
User-Level ISA document, available from User-Level ISA document, available from
https://riscv.org/specifications/ https://riscv.org/specifications/
Due to revisions of the ISA specification, some deviations
have arisen over time.
Notably, riscv,isa was defined prior to the creation of the
Zicsr and Zifencei extensions and thus "i" implies
"zicsr_zifencei".
While the isa strings in ISA specification are case While the isa strings in ISA specification are case
insensitive, letters in the riscv,isa string must be all insensitive, letters in the riscv,isa string must be all
lowercase to simplify parsing. lowercase to simplify parsing.
......
...@@ -47,16 +47,16 @@ config RISCV ...@@ -47,16 +47,16 @@ config RISCV
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT
select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select ARCH_WANT_LD_ORPHAN_WARN if !XIP_KERNEL select ARCH_WANT_LD_ORPHAN_WARN if !XIP_KERNEL
select ARCH_WANT_OPTIMIZE_VMEMMAP
select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
select BUILDTIME_TABLE_SORT if MMU select BUILDTIME_TABLE_SORT if MMU
select CLINT_TIMER if !MMU select CLINT_TIMER if !MMU
select CLONE_BACKWARDS select CLONE_BACKWARDS
select COMMON_CLK select COMMON_CLK
select CPU_PM if CPU_IDLE select CPU_PM if CPU_IDLE || HIBERNATION
select EDAC_SUPPORT select EDAC_SUPPORT
select GENERIC_ARCH_TOPOLOGY select GENERIC_ARCH_TOPOLOGY
select GENERIC_ATOMIC64 if !64BIT select GENERIC_ATOMIC64 if !64BIT
...@@ -142,12 +142,23 @@ config RISCV ...@@ -142,12 +142,23 @@ config RISCV
select TRACE_IRQFLAGS_SUPPORT select TRACE_IRQFLAGS_SUPPORT
select UACCESS_MEMCPY if !MMU select UACCESS_MEMCPY if !MMU
select ZONE_DMA32 if 64BIT select ZONE_DMA32 if 64BIT
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8) select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && (CLANG_SUPPORTS_DYNAMIC_FTRACE || GCC_SUPPORTS_DYNAMIC_FTRACE)
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION
config CLANG_SUPPORTS_DYNAMIC_FTRACE
def_bool CC_IS_CLANG
# https://github.com/llvm/llvm-project/commit/6ab8927931851bb42b2c93a00801dc499d7d9b1e
depends on CLANG_VERSION >= 130000
# https://github.com/ClangBuiltLinux/linux/issues/1817
depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600))
config GCC_SUPPORTS_DYNAMIC_FTRACE
def_bool CC_IS_GCC
depends on $(cc-option,-fpatchable-function-entry=8)
config ARCH_MMAP_RND_BITS_MIN config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT default 18 if 64BIT
default 8 default 8
...@@ -788,6 +799,12 @@ menu "Power management options" ...@@ -788,6 +799,12 @@ menu "Power management options"
source "kernel/power/Kconfig" source "kernel/power/Kconfig"
config ARCH_HIBERNATION_POSSIBLE
def_bool y
config ARCH_HIBERNATION_HEADER
def_bool HIBERNATION
endmenu # "Power management options" endmenu # "Power management options"
menu "CPU Power Management" menu "CPU Power Management"
......
...@@ -82,11 +82,9 @@ static void __init_or_module warn_miss_errata(u32 miss_errata) ...@@ -82,11 +82,9 @@ static void __init_or_module warn_miss_errata(u32 miss_errata)
pr_warn("----------------------------------------------------------------\n"); pr_warn("----------------------------------------------------------------\n");
} }
void __init_or_module sifive_errata_patch_func(struct alt_entry *begin, void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
struct alt_entry *end, unsigned long archid, unsigned long impid,
unsigned long archid, unsigned int stage)
unsigned long impid,
unsigned int stage)
{ {
struct alt_entry *alt; struct alt_entry *alt;
u32 cpu_req_errata; u32 cpu_req_errata;
......
...@@ -83,9 +83,9 @@ static u32 thead_errata_probe(unsigned int stage, ...@@ -83,9 +83,9 @@ static u32 thead_errata_probe(unsigned int stage,
return cpu_req_errata; return cpu_req_errata;
} }
void __init_or_module thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid, unsigned long archid, unsigned long impid,
unsigned int stage) unsigned int stage)
{ {
struct alt_entry *alt; struct alt_entry *alt;
u32 cpu_req_errata = thead_errata_probe(stage, archid, impid); u32 cpu_req_errata = thead_errata_probe(stage, archid, impid);
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2023 StarFive Technology Co., Ltd.
*
* Author: Jee Heng Sia <jeeheng.sia@starfivetech.com>
*/
#ifndef __ASSEMBLY__
#error "Only include this from assembly code"
#endif
#ifndef __ASM_ASSEMBLER_H
#define __ASM_ASSEMBLER_H
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/csr.h>
/*
* suspend_restore_csrs - restore CSRs
*/
.macro suspend_restore_csrs
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
csrw CSR_EPC, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
csrw CSR_STATUS, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
csrw CSR_TVAL, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
csrw CSR_CAUSE, t0
.endm
/*
* suspend_restore_regs - Restore registers (except A0 and T0-T6)
*/
.macro suspend_restore_regs
REG_L ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
REG_L sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
REG_L gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
REG_L tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
REG_L s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
REG_L s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
REG_L a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
REG_L a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
REG_L a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
REG_L a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
REG_L a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
REG_L a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
REG_L a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
REG_L s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
REG_L s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
REG_L s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
REG_L s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
REG_L s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
REG_L s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
REG_L s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
REG_L s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
REG_L s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
.endm
/*
* copy_page - copy 1 page (4KB) of data from source to destination
* @a0 - destination
* @a1 - source
*/
.macro copy_page a0, a1
lui a2, 0x1
add a2, a2, a0
1 :
REG_L t0, 0(a1)
REG_L t1, SZREG(a1)
REG_S t0, 0(a0)
REG_S t1, SZREG(a0)
addi a0, a0, 2 * SZREG
addi a1, a1, 2 * SZREG
bne a2, a0, 1b
.endm
#endif /* __ASM_ASSEMBLER_H */
...@@ -295,7 +295,7 @@ int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask, ...@@ -295,7 +295,7 @@ int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
unsigned long start, unsigned long start,
unsigned long size, unsigned long size,
unsigned long asid); unsigned long asid);
int sbi_probe_extension(int ext); long sbi_probe_extension(int ext);
/* Check if current SBI specification version is 0.1 or not */ /* Check if current SBI specification version is 0.1 or not */
static inline int sbi_spec_is_0_1(void) static inline int sbi_spec_is_0_1(void)
......
...@@ -21,6 +21,11 @@ struct suspend_context { ...@@ -21,6 +21,11 @@ struct suspend_context {
#endif #endif
}; };
/*
* Used by hibernation core and cleared during resume sequence
*/
extern int in_suspend;
/* Low-level CPU suspend entry function */ /* Low-level CPU suspend entry function */
int __cpu_suspend_enter(struct suspend_context *context); int __cpu_suspend_enter(struct suspend_context *context);
...@@ -33,4 +38,21 @@ int cpu_suspend(unsigned long arg, ...@@ -33,4 +38,21 @@ int cpu_suspend(unsigned long arg,
/* Low-level CPU resume entry function */ /* Low-level CPU resume entry function */
int __cpu_resume_enter(unsigned long hartid, unsigned long context); int __cpu_resume_enter(unsigned long hartid, unsigned long context);
/* Used to save and restore the CSRs */
void suspend_save_csrs(struct suspend_context *context);
void suspend_restore_csrs(struct suspend_context *context);
/* Low-level API to support hibernation */
int swsusp_arch_suspend(void);
int swsusp_arch_resume(void);
int arch_hibernation_header_save(void *addr, unsigned int max_size);
int arch_hibernation_header_restore(void *addr);
int __hibernate_cpu_resume(void);
/* Used to resume on the CPU we hibernated on */
int hibernate_resume_nonboot_cpu_disable(void);
asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp,
unsigned long cpu_resume);
asmlinkage int hibernate_core_restore_code(void);
#endif #endif
...@@ -9,6 +9,7 @@ CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE) ...@@ -9,6 +9,7 @@ CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
endif endif
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,) CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
ifdef CONFIG_KEXEC ifdef CONFIG_KEXEC
AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax) AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
...@@ -64,6 +65,7 @@ obj-$(CONFIG_MODULES) += module.o ...@@ -64,6 +65,7 @@ obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
obj-$(CONFIG_CPU_PM) += suspend_entry.o suspend.o obj-$(CONFIG_CPU_PM) += suspend_entry.o suspend.o
obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/kbuild.h> #include <linux/kbuild.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/suspend.h>
#include <asm/kvm_host.h> #include <asm/kvm_host.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
...@@ -116,6 +117,10 @@ void asm_offsets(void) ...@@ -116,6 +117,10 @@ void asm_offsets(void)
OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs); OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs);
OFFSET(HIBERN_PBE_ADDR, pbe, address);
OFFSET(HIBERN_PBE_ORIG, pbe, orig_address);
OFFSET(HIBERN_PBE_NEXT, pbe, next);
OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero); OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero);
OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra); OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra);
OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp); OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp);
......
...@@ -27,7 +27,7 @@ const struct cpu_operations cpu_ops_spinwait = { ...@@ -27,7 +27,7 @@ const struct cpu_operations cpu_ops_spinwait = {
void __init cpu_set_ops(int cpuid) void __init cpu_set_ops(int cpuid)
{ {
#if IS_ENABLED(CONFIG_RISCV_SBI) #if IS_ENABLED(CONFIG_RISCV_SBI)
if (sbi_probe_extension(SBI_EXT_HSM) > 0) { if (sbi_probe_extension(SBI_EXT_HSM)) {
if (!cpuid) if (!cpuid)
pr_info("SBI HSM extension detected\n"); pr_info("SBI HSM extension detected\n");
cpu_ops[cpuid] = &cpu_ops_sbi; cpu_ops[cpuid] = &cpu_ops_sbi;
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/of.h> #include <linux/of.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
#include <asm/patch.h> #include <asm/patch.h>
#include <asm/processor.h> #include <asm/processor.h>
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Hibernation low level support for RISCV.
*
* Copyright (C) 2023 StarFive Technology Co., Ltd.
*
* Author: Jee Heng Sia <jeeheng.sia@starfivetech.com>
*/
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/csr.h>
#include <linux/linkage.h>
/*
* int __hibernate_cpu_resume(void)
* Switch back to the hibernated image's page table prior to restoring the CPU
* context.
*
* Always returns 0
*/
ENTRY(__hibernate_cpu_resume)
/* switch to hibernated image's page table. */
csrw CSR_SATP, s0
sfence.vma
REG_L a0, hibernate_cpu_context
suspend_restore_csrs
suspend_restore_regs
/* Return zero value. */
mv a0, zero
ret
END(__hibernate_cpu_resume)
/*
* Prepare to restore the image.
* a0: satp of saved page tables.
* a1: satp of temporary page tables.
* a2: cpu_resume.
*/
ENTRY(hibernate_restore_image)
mv s0, a0
mv s1, a1
mv s2, a2
REG_L s4, restore_pblist
REG_L a1, relocated_restore_code
jalr a1
END(hibernate_restore_image)
/*
* The below code will be executed from a 'safe' page.
* It first switches to the temporary page table, then starts to copy the pages
* back to the original memory location. Finally, it jumps to __hibernate_cpu_resume()
* to restore the CPU context.
*/
ENTRY(hibernate_core_restore_code)
/* switch to temp page table. */
csrw satp, s1
sfence.vma
.Lcopy:
/* The below code will restore the hibernated image. */
REG_L a1, HIBERN_PBE_ADDR(s4)
REG_L a0, HIBERN_PBE_ORIG(s4)
copy_page a0, a1
REG_L s4, HIBERN_PBE_NEXT(s4)
bnez s4, .Lcopy
jalr s2
END(hibernate_core_restore_code)
This diff is collapsed.
...@@ -524,19 +524,18 @@ static void sbi_srst_power_off(void) ...@@ -524,19 +524,18 @@ static void sbi_srst_power_off(void)
* sbi_probe_extension() - Check if an SBI extension ID is supported or not. * sbi_probe_extension() - Check if an SBI extension ID is supported or not.
* @extid: The extension ID to be probed. * @extid: The extension ID to be probed.
* *
* Return: Extension specific nonzero value f yes, -ENOTSUPP otherwise. * Return: 1 or an extension specific nonzero value if yes, 0 otherwise.
*/ */
int sbi_probe_extension(int extid) long sbi_probe_extension(int extid)
{ {
struct sbiret ret; struct sbiret ret;
ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid, ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid,
0, 0, 0, 0, 0); 0, 0, 0, 0, 0);
if (!ret.error) if (!ret.error)
if (ret.value) return ret.value;
return ret.value;
return -ENOTSUPP; return 0;
} }
EXPORT_SYMBOL(sbi_probe_extension); EXPORT_SYMBOL(sbi_probe_extension);
...@@ -599,26 +598,26 @@ void __init sbi_init(void) ...@@ -599,26 +598,26 @@ void __init sbi_init(void)
if (!sbi_spec_is_0_1()) { if (!sbi_spec_is_0_1()) {
pr_info("SBI implementation ID=0x%lx Version=0x%lx\n", pr_info("SBI implementation ID=0x%lx Version=0x%lx\n",
sbi_get_firmware_id(), sbi_get_firmware_version()); sbi_get_firmware_id(), sbi_get_firmware_version());
if (sbi_probe_extension(SBI_EXT_TIME) > 0) { if (sbi_probe_extension(SBI_EXT_TIME)) {
__sbi_set_timer = __sbi_set_timer_v02; __sbi_set_timer = __sbi_set_timer_v02;
pr_info("SBI TIME extension detected\n"); pr_info("SBI TIME extension detected\n");
} else { } else {
__sbi_set_timer = __sbi_set_timer_v01; __sbi_set_timer = __sbi_set_timer_v01;
} }
if (sbi_probe_extension(SBI_EXT_IPI) > 0) { if (sbi_probe_extension(SBI_EXT_IPI)) {
__sbi_send_ipi = __sbi_send_ipi_v02; __sbi_send_ipi = __sbi_send_ipi_v02;
pr_info("SBI IPI extension detected\n"); pr_info("SBI IPI extension detected\n");
} else { } else {
__sbi_send_ipi = __sbi_send_ipi_v01; __sbi_send_ipi = __sbi_send_ipi_v01;
} }
if (sbi_probe_extension(SBI_EXT_RFENCE) > 0) { if (sbi_probe_extension(SBI_EXT_RFENCE)) {
__sbi_rfence = __sbi_rfence_v02; __sbi_rfence = __sbi_rfence_v02;
pr_info("SBI RFENCE extension detected\n"); pr_info("SBI RFENCE extension detected\n");
} else { } else {
__sbi_rfence = __sbi_rfence_v01; __sbi_rfence = __sbi_rfence_v01;
} }
if ((sbi_spec_version >= sbi_mk_version(0, 3)) && if ((sbi_spec_version >= sbi_mk_version(0, 3)) &&
(sbi_probe_extension(SBI_EXT_SRST) > 0)) { sbi_probe_extension(SBI_EXT_SRST)) {
pr_info("SBI SRST extension detected\n"); pr_info("SBI SRST extension detected\n");
pm_power_off = sbi_srst_power_off; pm_power_off = sbi_srst_power_off;
sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot; sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot;
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <asm/csr.h> #include <asm/csr.h>
#include <asm/suspend.h> #include <asm/suspend.h>
static void suspend_save_csrs(struct suspend_context *context) void suspend_save_csrs(struct suspend_context *context)
{ {
context->scratch = csr_read(CSR_SCRATCH); context->scratch = csr_read(CSR_SCRATCH);
context->tvec = csr_read(CSR_TVEC); context->tvec = csr_read(CSR_TVEC);
...@@ -29,7 +29,7 @@ static void suspend_save_csrs(struct suspend_context *context) ...@@ -29,7 +29,7 @@ static void suspend_save_csrs(struct suspend_context *context)
#endif #endif
} }
static void suspend_restore_csrs(struct suspend_context *context) void suspend_restore_csrs(struct suspend_context *context)
{ {
csr_write(CSR_SCRATCH, context->scratch); csr_write(CSR_SCRATCH, context->scratch);
csr_write(CSR_TVEC, context->tvec); csr_write(CSR_TVEC, context->tvec);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/csr.h> #include <asm/csr.h>
#include <asm/xip_fixup.h> #include <asm/xip_fixup.h>
...@@ -83,39 +84,10 @@ ENTRY(__cpu_resume_enter) ...@@ -83,39 +84,10 @@ ENTRY(__cpu_resume_enter)
add a0, a1, zero add a0, a1, zero
/* Restore CSRs */ /* Restore CSRs */
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0) suspend_restore_csrs
csrw CSR_EPC, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
csrw CSR_STATUS, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
csrw CSR_TVAL, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
csrw CSR_CAUSE, t0
/* Restore registers (except A0 and T0-T6) */ /* Restore registers (except A0 and T0-T6) */
REG_L ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0) suspend_restore_regs
REG_L sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
REG_L gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
REG_L tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
REG_L s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
REG_L s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
REG_L a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
REG_L a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
REG_L a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
REG_L a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
REG_L a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
REG_L a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
REG_L a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
REG_L s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
REG_L s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
REG_L s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
REG_L s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
REG_L s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
REG_L s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
REG_L s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
REG_L s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
REG_L s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
/* Return zero value */ /* Return zero value */
add a0, zero, zero add a0, zero, zero
......
...@@ -104,6 +104,12 @@ SECTIONS ...@@ -104,6 +104,12 @@ SECTIONS
*(.rel.dyn*) *(.rel.dyn*)
} }
.rela.dyn : ALIGN(8) {
__rela_dyn_start = .;
*(.rela .rela*)
__rela_dyn_end = .;
}
__init_data_end = .; __init_data_end = .;
. = ALIGN(8); . = ALIGN(8);
...@@ -130,12 +136,6 @@ SECTIONS ...@@ -130,12 +136,6 @@ SECTIONS
*(.sdata*) *(.sdata*)
} }
.rela.dyn : ALIGN(8) {
__rela_dyn_start = .;
*(.rela .rela*)
__rela_dyn_end = .;
}
.got : { *(.got*) } .got : { *(.got*) }
#ifdef CONFIG_RELOCATABLE #ifdef CONFIG_RELOCATABLE
......
...@@ -80,7 +80,7 @@ static int __init riscv_kvm_init(void) ...@@ -80,7 +80,7 @@ static int __init riscv_kvm_init(void)
return -ENODEV; return -ENODEV;
} }
if (sbi_probe_extension(SBI_EXT_RFENCE) <= 0) { if (!sbi_probe_extension(SBI_EXT_RFENCE)) {
kvm_info("require SBI RFENCE extension\n"); kvm_info("require SBI RFENCE extension\n");
return -ENODEV; return -ENODEV;
} }
......
...@@ -919,8 +919,7 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early) ...@@ -919,8 +919,7 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
* this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
* entry. * entry.
*/ */
static void __init create_fdt_early_page_table(pgd_t *pgdir, static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
uintptr_t fix_fdt_va,
uintptr_t dtb_pa) uintptr_t dtb_pa)
{ {
uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1); uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
...@@ -1132,8 +1131,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) ...@@ -1132,8 +1131,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
create_kernel_page_table(early_pg_dir, true); create_kernel_page_table(early_pg_dir, true);
/* Setup early mapping for FDT early scan */ /* Setup early mapping for FDT early scan */
create_fdt_early_page_table(early_pg_dir, create_fdt_early_page_table(__fix_to_virt(FIX_FDT), dtb_pa);
__fix_to_virt(FIX_FDT), dtb_pa);
/* /*
* Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
......
...@@ -217,18 +217,26 @@ bool kernel_page_present(struct page *page) ...@@ -217,18 +217,26 @@ bool kernel_page_present(struct page *page)
pgd = pgd_offset_k(addr); pgd = pgd_offset_k(addr);
if (!pgd_present(*pgd)) if (!pgd_present(*pgd))
return false; return false;
if (pgd_leaf(*pgd))
return true;
p4d = p4d_offset(pgd, addr); p4d = p4d_offset(pgd, addr);
if (!p4d_present(*p4d)) if (!p4d_present(*p4d))
return false; return false;
if (p4d_leaf(*p4d))
return true;
pud = pud_offset(p4d, addr); pud = pud_offset(p4d, addr);
if (!pud_present(*pud)) if (!pud_present(*pud))
return false; return false;
if (pud_leaf(*pud))
return true;
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd)) if (!pmd_present(*pmd))
return false; return false;
if (pmd_leaf(*pmd))
return true;
pte = pte_offset_kernel(pmd, addr); pte = pte_offset_kernel(pmd, addr);
return pte_present(*pte); return pte_present(*pte);
......
...@@ -613,7 +613,7 @@ static int __init sbi_cpuidle_init(void) ...@@ -613,7 +613,7 @@ static int __init sbi_cpuidle_init(void)
* 2) SBI HSM extension is available * 2) SBI HSM extension is available
*/ */
if ((sbi_spec_version < sbi_mk_version(0, 3)) || if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
sbi_probe_extension(SBI_EXT_HSM) <= 0) { !sbi_probe_extension(SBI_EXT_HSM)) {
pr_info("HSM suspend not available\n"); pr_info("HSM suspend not available\n");
return 0; return 0;
} }
......
...@@ -924,7 +924,7 @@ static int __init pmu_sbi_devinit(void) ...@@ -924,7 +924,7 @@ static int __init pmu_sbi_devinit(void)
struct platform_device *pdev; struct platform_device *pdev;
if (sbi_spec_version < sbi_mk_version(0, 3) || if (sbi_spec_version < sbi_mk_version(0, 3) ||
sbi_probe_extension(SBI_EXT_PMU) <= 0) { !sbi_probe_extension(SBI_EXT_PMU)) {
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment