Commit 53630a1f authored by Catalin Marinas's avatar Catalin Marinas

Merge branch 'for-next/misc' into for-next/core

* for-next/misc:
  : Miscellaneous patches
  arm64/kprobe: Optimize the performance of patching single-step slot
  ARM64: reloc_test: add __init/__exit annotations to module init/exit funcs
  arm64/mm: fold check for KFENCE into can_set_direct_map()
  arm64: uaccess: simplify uaccess_mask_ptr()
  arm64: mte: move register initialization to C
  arm64: mm: handle ARM64_KERNEL_USES_PMD_MAPS in vmemmap_populate()
  arm64: dma: Drop cache invalidation from arch_dma_prep_coherent()
  arm64: support huge vmalloc mappings
  arm64: spectre: increase parameters that can be used to turn off bhb mitigation individually
  arm64: run softirqs on the per-CPU IRQ stack
  arm64: compat: Implement misalignment fixups for multiword loads
parents c704cf27 a0caebbd
...@@ -3207,6 +3207,7 @@ ...@@ -3207,6 +3207,7 @@
spectre_v2_user=off [X86] spectre_v2_user=off [X86]
spec_store_bypass_disable=off [X86,PPC] spec_store_bypass_disable=off [X86,PPC]
ssbd=force-off [ARM64] ssbd=force-off [ARM64]
nospectre_bhb [ARM64]
l1tf=off [X86] l1tf=off [X86]
mds=off [X86] mds=off [X86]
tsx_async_abort=off [X86] tsx_async_abort=off [X86]
...@@ -3613,7 +3614,7 @@ ...@@ -3613,7 +3614,7 @@
nohugeiomap [KNL,X86,PPC,ARM64] Disable kernel huge I/O mappings. nohugeiomap [KNL,X86,PPC,ARM64] Disable kernel huge I/O mappings.
nohugevmalloc [PPC] Disable kernel huge vmalloc mappings. nohugevmalloc [KNL,X86,PPC,ARM64] Disable kernel huge vmalloc mappings.
nosmt [KNL,S390] Disable symmetric multithreading (SMT). nosmt [KNL,S390] Disable symmetric multithreading (SMT).
Equivalent to smt=1. Equivalent to smt=1.
...@@ -3631,6 +3632,10 @@ ...@@ -3631,6 +3632,10 @@
vulnerability. System may allow data leaks with this vulnerability. System may allow data leaks with this
option. option.
nospectre_bhb [ARM64] Disable all mitigations for Spectre-BHB (branch
history injection) vulnerability. System may allow data leaks
with this option.
nospec_store_bypass_disable nospec_store_bypass_disable
[HW] Disable all mitigations for the Speculative Store Bypass vulnerability [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
......
...@@ -149,6 +149,7 @@ config ARM64 ...@@ -149,6 +149,7 @@ config ARM64
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_COMPILER_H select HAVE_ARCH_COMPILER_H
select HAVE_ARCH_HUGE_VMALLOC
select HAVE_ARCH_HUGE_VMAP select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_JUMP_LABEL_RELATIVE
...@@ -230,6 +231,7 @@ config ARM64 ...@@ -230,6 +231,7 @@ config ARM64
select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD
select TRACE_IRQFLAGS_SUPPORT select TRACE_IRQFLAGS_SUPPORT
select TRACE_IRQFLAGS_NMI_SUPPORT select TRACE_IRQFLAGS_NMI_SUPPORT
select HAVE_SOFTIRQ_ON_OWN_STACK
help help
ARM 64-bit (AArch64) Linux support. ARM 64-bit (AArch64) Linux support.
...@@ -1575,6 +1577,9 @@ config THUMB2_COMPAT_VDSO ...@@ -1575,6 +1577,9 @@ config THUMB2_COMPAT_VDSO
Compile the compat vDSO with '-mthumb -fomit-frame-pointer' if y, Compile the compat vDSO with '-mthumb -fomit-frame-pointer' if y,
otherwise with '-marm'. otherwise with '-marm'.
config COMPAT_ALIGNMENT_FIXUPS
bool "Fix up misaligned multi-word loads and stores in user space"
menuconfig ARMV8_DEPRECATED menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions" bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on SYSCTL depends on SYSCTL
......
...@@ -71,6 +71,7 @@ void do_sysinstr(unsigned long esr, struct pt_regs *regs); ...@@ -71,6 +71,7 @@ void do_sysinstr(unsigned long esr, struct pt_regs *regs);
void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs); void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs);
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr); void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr);
void do_cp15instr(unsigned long esr, struct pt_regs *regs); void do_cp15instr(unsigned long esr, struct pt_regs *regs);
int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs); void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs); void do_el0_svc_compat(struct pt_regs *regs);
void do_el0_fpac(struct pt_regs *regs, unsigned long esr); void do_el0_fpac(struct pt_regs *regs, unsigned long esr);
......
...@@ -42,7 +42,9 @@ void mte_sync_tags(pte_t old_pte, pte_t pte); ...@@ -42,7 +42,9 @@ void mte_sync_tags(pte_t old_pte, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom); void mte_copy_page_tags(void *kto, const void *kfrom);
void mte_thread_init_user(void); void mte_thread_init_user(void);
void mte_thread_switch(struct task_struct *next); void mte_thread_switch(struct task_struct *next);
void mte_cpu_setup(void);
void mte_suspend_enter(void); void mte_suspend_enter(void);
void mte_suspend_exit(void);
long set_mte_ctrl(struct task_struct *task, unsigned long arg); long set_mte_ctrl(struct task_struct *task, unsigned long arg);
long get_mte_ctrl(struct task_struct *task); long get_mte_ctrl(struct task_struct *task);
int mte_ptrace_copy_tags(struct task_struct *child, long request, int mte_ptrace_copy_tags(struct task_struct *child, long request,
...@@ -72,6 +74,9 @@ static inline void mte_thread_switch(struct task_struct *next) ...@@ -72,6 +74,9 @@ static inline void mte_thread_switch(struct task_struct *next)
static inline void mte_suspend_enter(void) static inline void mte_suspend_enter(void)
{ {
} }
static inline void mte_suspend_exit(void)
{
}
static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg) static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{ {
return 0; return 0;
......
...@@ -203,9 +203,11 @@ static inline void uaccess_enable_privileged(void) ...@@ -203,9 +203,11 @@ static inline void uaccess_enable_privileged(void)
} }
/* /*
* Sanitise a uaccess pointer such that it becomes NULL if above the maximum * Sanitize a uaccess pointer such that it cannot reach any kernel address.
* user address. In case the pointer is tagged (has the top byte set), untag *
* the pointer before checking. * Clearing bit 55 ensures the pointer cannot address any portion of the TTBR1
* address range (i.e. any kernel address), and either the pointer falls within
* the TTBR0 address range or must cause a fault.
*/ */
#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
static inline void __user *__uaccess_mask_ptr(const void __user *ptr) static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
...@@ -213,14 +215,12 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) ...@@ -213,14 +215,12 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
void __user *safe_ptr; void __user *safe_ptr;
asm volatile( asm volatile(
" bics xzr, %3, %2\n" " bic %0, %1, %2\n"
" csel %0, %1, xzr, eq\n" : "=r" (safe_ptr)
: "=&r" (safe_ptr) : "r" (ptr),
: "r" (ptr), "r" (TASK_SIZE_MAX - 1), "i" (BIT(55))
"r" (untagged_addr(ptr)) );
: "cc");
csdb();
return safe_ptr; return safe_ptr;
} }
......
...@@ -45,6 +45,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE ...@@ -45,6 +45,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \ obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
sys_compat.o sys_compat.o
obj-$(CONFIG_COMPAT) += sigreturn32.o obj-$(CONFIG_COMPAT) += sigreturn32.o
obj-$(CONFIG_COMPAT_ALIGNMENT_FIXUPS) += compat_alignment.o
obj-$(CONFIG_KUSER_HELPERS) += kuser32.o obj-$(CONFIG_KUSER_HELPERS) += kuser32.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
......
This diff is collapsed.
...@@ -2043,7 +2043,8 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused) ...@@ -2043,7 +2043,8 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
{ {
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0); sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0);
isb();
mte_cpu_setup();
/* /*
* Clear the tags in the zero page. This needs to be done via the * Clear the tags in the zero page. This needs to be done via the
......
...@@ -21,7 +21,9 @@ ...@@ -21,7 +21,9 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <asm/daifflags.h> #include <asm/daifflags.h>
#include <asm/exception.h>
#include <asm/vmap_stack.h> #include <asm/vmap_stack.h>
#include <asm/softirq_stack.h>
/* Only access this in an NMI enter/exit */ /* Only access this in an NMI enter/exit */
DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts); DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
...@@ -71,6 +73,18 @@ static void init_irq_stacks(void) ...@@ -71,6 +73,18 @@ static void init_irq_stacks(void)
} }
#endif #endif
#ifndef CONFIG_PREEMPT_RT
static void ____do_softirq(struct pt_regs *regs)
{
__do_softirq();
}
void do_softirq_own_stack(void)
{
call_on_irq_stack(NULL, ____do_softirq);
}
#endif
static void default_handle_irq(struct pt_regs *regs) static void default_handle_irq(struct pt_regs *regs)
{ {
panic("IRQ taken without a root IRQ handler\n"); panic("IRQ taken without a root IRQ handler\n");
......
...@@ -285,6 +285,49 @@ void mte_thread_switch(struct task_struct *next) ...@@ -285,6 +285,49 @@ void mte_thread_switch(struct task_struct *next)
mte_check_tfsr_el1(); mte_check_tfsr_el1();
} }
void mte_cpu_setup(void)
{
u64 rgsr;
/*
* CnP must be enabled only after the MAIR_EL1 register has been set
* up. Inconsistent MAIR_EL1 between CPUs sharing the same TLB may
* lead to the wrong memory type being used for a brief window during
* CPU power-up.
*
* CnP is not a boot feature so MTE gets enabled before CnP, but let's
* make sure that is the case.
*/
BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT);
BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT);
/* Normal Tagged memory type at the corresponding MAIR index */
sysreg_clear_set(mair_el1,
MAIR_ATTRIDX(MAIR_ATTR_MASK, MT_NORMAL_TAGGED),
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_TAGGED,
MT_NORMAL_TAGGED));
write_sysreg_s(KERNEL_GCR_EL1, SYS_GCR_EL1);
/*
* If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
* RGSR_EL1.SEED must be non-zero for IRG to produce
* pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
* must initialize it.
*/
rgsr = (read_sysreg(CNTVCT_EL0) & SYS_RGSR_EL1_SEED_MASK) <<
SYS_RGSR_EL1_SEED_SHIFT;
if (rgsr == 0)
rgsr = 1 << SYS_RGSR_EL1_SEED_SHIFT;
write_sysreg_s(rgsr, SYS_RGSR_EL1);
/* clear any pending tag check faults in TFSR*_EL1 */
write_sysreg_s(0, SYS_TFSR_EL1);
write_sysreg_s(0, SYS_TFSRE0_EL1);
local_flush_tlb_all();
}
void mte_suspend_enter(void) void mte_suspend_enter(void)
{ {
if (!system_supports_mte()) if (!system_supports_mte())
...@@ -301,6 +344,14 @@ void mte_suspend_enter(void) ...@@ -301,6 +344,14 @@ void mte_suspend_enter(void)
mte_check_tfsr_el1(); mte_check_tfsr_el1();
} }
void mte_suspend_exit(void)
{
if (!system_supports_mte())
return;
mte_cpu_setup();
}
long set_mte_ctrl(struct task_struct *task, unsigned long arg) long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{ {
u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) & u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
......
...@@ -44,13 +44,28 @@ post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *); ...@@ -44,13 +44,28 @@ post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
static void __kprobes arch_prepare_ss_slot(struct kprobe *p) static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{ {
kprobe_opcode_t *addr = p->ainsn.api.insn; kprobe_opcode_t *addr = p->ainsn.api.insn;
void *addrs[] = {addr, addr + 1};
u32 insns[] = {p->opcode, BRK64_OPCODE_KPROBES_SS};
/* prepare insn slot */ /*
aarch64_insn_patch_text(addrs, insns, 2); * Prepare insn slot, Mark Rutland points out it depends on a coupe of
* subtleties:
flush_icache_range((uintptr_t)addr, (uintptr_t)(addr + MAX_INSN_SIZE)); *
* - That the I-cache maintenance for these instructions is complete
* *before* the kprobe BRK is written (and aarch64_insn_patch_text_nosync()
* ensures this, but just omits causing a Context-Synchronization-Event
* on all CPUS).
*
* - That the kprobe BRK results in an exception (and consequently a
* Context-Synchronoization-Event), which ensures that the CPU will
* fetch thesingle-step slot instructions *after* this, ensuring that
* the new instructions are used
*
* It supposes to place ISB after patching to guarantee I-cache maintenance
* is observed on all CPUS, however, single-step slot is installed in
* the BRK exception handler, so it is unnecessary to generate
* Contex-Synchronization-Event via ISB again.
*/
aarch64_insn_patch_text_nosync(addr, p->opcode);
aarch64_insn_patch_text_nosync(addr + 1, BRK64_OPCODE_KPROBES_SS);
/* /*
* Needs restoring of return address after stepping xol. * Needs restoring of return address after stepping xol.
......
...@@ -988,6 +988,14 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) ...@@ -988,6 +988,14 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
isb(); isb();
} }
static bool __read_mostly __nospectre_bhb;
static int __init parse_spectre_bhb_param(char *str)
{
__nospectre_bhb = true;
return 0;
}
early_param("nospectre_bhb", parse_spectre_bhb_param);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
{ {
bp_hardening_cb_t cpu_cb; bp_hardening_cb_t cpu_cb;
...@@ -1001,7 +1009,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) ...@@ -1001,7 +1009,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
/* No point mitigating Spectre-BHB alone. */ /* No point mitigating Spectre-BHB alone. */
} else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) { } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
pr_info_once("spectre-bhb mitigation disabled by compile time option\n"); pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
} else if (cpu_mitigations_off()) { } else if (cpu_mitigations_off() || __nospectre_bhb) {
pr_info_once("spectre-bhb mitigation disabled by command line option\n"); pr_info_once("spectre-bhb mitigation disabled by command line option\n");
} else if (supports_ecbhb(SCOPE_LOCAL_CPU)) { } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
state = SPECTRE_MITIGATED; state = SPECTRE_MITIGATED;
......
...@@ -48,7 +48,7 @@ static struct { ...@@ -48,7 +48,7 @@ static struct {
{ "R_AARCH64_PREL16", relative_data16, (u64)&sym64_rel }, { "R_AARCH64_PREL16", relative_data16, (u64)&sym64_rel },
}; };
static int reloc_test_init(void) static int __init reloc_test_init(void)
{ {
int i; int i;
...@@ -67,7 +67,7 @@ static int reloc_test_init(void) ...@@ -67,7 +67,7 @@ static int reloc_test_init(void)
return 0; return 0;
} }
static void reloc_test_exit(void) static void __exit reloc_test_exit(void)
{ {
} }
......
...@@ -43,6 +43,8 @@ void notrace __cpu_suspend_exit(void) ...@@ -43,6 +43,8 @@ void notrace __cpu_suspend_exit(void)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
mte_suspend_exit();
/* /*
* We are resuming from reset with the idmap active in TTBR0_EL1. * We are resuming from reset with the idmap active in TTBR0_EL1.
* We must uninstall the idmap and restore the expected MMU * We must uninstall the idmap and restore the expected MMU
......
...@@ -36,7 +36,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) ...@@ -36,7 +36,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
{ {
unsigned long start = (unsigned long)page_address(page); unsigned long start = (unsigned long)page_address(page);
dcache_clean_inval_poc(start, start + size); dcache_clean_poc(start, start + size);
} }
#ifdef CONFIG_IOMMU_DMA #ifdef CONFIG_IOMMU_DMA
......
...@@ -691,6 +691,9 @@ static int __kprobes do_translation_fault(unsigned long far, ...@@ -691,6 +691,9 @@ static int __kprobes do_translation_fault(unsigned long far,
static int do_alignment_fault(unsigned long far, unsigned long esr, static int do_alignment_fault(unsigned long far, unsigned long esr,
struct pt_regs *regs) struct pt_regs *regs)
{ {
if (IS_ENABLED(CONFIG_COMPAT_ALIGNMENT_FIXUPS) &&
compat_user_mode(regs))
return do_compat_alignment_fixup(far, regs);
do_bad_area(far, esr, regs); do_bad_area(far, esr, regs);
return 0; return 0;
} }
......
...@@ -535,7 +535,7 @@ static void __init map_mem(pgd_t *pgdp) ...@@ -535,7 +535,7 @@ static void __init map_mem(pgd_t *pgdp)
*/ */
BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end)); BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE)) if (can_set_direct_map())
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/* /*
...@@ -1180,14 +1180,6 @@ static void free_empty_tables(unsigned long addr, unsigned long end, ...@@ -1180,14 +1180,6 @@ static void free_empty_tables(unsigned long addr, unsigned long end,
} }
#endif #endif
#if !ARM64_KERNEL_USES_PMD_MAPS
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
return vmemmap_populate_basepages(start, end, node, altmap);
}
#else /* !ARM64_KERNEL_USES_PMD_MAPS */
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap) struct vmem_altmap *altmap)
{ {
...@@ -1199,6 +1191,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, ...@@ -1199,6 +1191,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
pmd_t *pmdp; pmd_t *pmdp;
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
if (!ARM64_KERNEL_USES_PMD_MAPS)
return vmemmap_populate_basepages(start, end, node, altmap);
do { do {
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
...@@ -1232,7 +1228,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, ...@@ -1232,7 +1228,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return 0; return 0;
} }
#endif /* !ARM64_KERNEL_USES_PMD_MAPS */
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end, void vmemmap_free(unsigned long start, unsigned long end,
...@@ -1547,11 +1542,7 @@ int arch_add_memory(int nid, u64 start, u64 size, ...@@ -1547,11 +1542,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
VM_BUG_ON(!mhp_range_allowed(start, size, true)); VM_BUG_ON(!mhp_range_allowed(start, size, true));
/* if (can_set_direct_map())
* KFENCE requires linear map to be mapped at page granularity, so that
* it is possible to protect/unprotect single pages in the KFENCE pool.
*/
if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE))
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
......
...@@ -21,7 +21,13 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED ...@@ -21,7 +21,13 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
bool can_set_direct_map(void) bool can_set_direct_map(void)
{ {
return rodata_full || debug_pagealloc_enabled(); /*
* rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be
* mapped at page granularity, so that it is possible to
* protect/unprotect single pages.
*/
return rodata_full || debug_pagealloc_enabled() ||
IS_ENABLED(CONFIG_KFENCE);
} }
static int change_page_range(pte_t *ptep, unsigned long addr, void *data) static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
......
...@@ -48,17 +48,19 @@ ...@@ -48,17 +48,19 @@
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1 #define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1
#else #elif defined(CONFIG_ARM64_MTE)
/* /*
* The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
* TBI being enabled at EL1. * TBI being enabled at EL1.
*/ */
#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1 #define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
#else
#define TCR_MTE_FLAGS 0
#endif #endif
/* /*
* Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
* changed during __cpu_setup to Normal Tagged if the system supports MTE. * changed during mte_cpu_setup to Normal Tagged if the system supports MTE.
*/ */
#define MAIR_EL1_SET \ #define MAIR_EL1_SET \
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
...@@ -426,46 +428,8 @@ SYM_FUNC_START(__cpu_setup) ...@@ -426,46 +428,8 @@ SYM_FUNC_START(__cpu_setup)
mov_q mair, MAIR_EL1_SET mov_q mair, MAIR_EL1_SET
mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
#ifdef CONFIG_ARM64_MTE
/*
* Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
* (ID_AA64PFR1_EL1[11:8] > 1).
*/
mrs x10, ID_AA64PFR1_EL1
ubfx x10, x10, #ID_AA64PFR1_EL1_MTE_SHIFT, #4
cmp x10, #ID_AA64PFR1_EL1_MTE_MTE2
b.lt 1f
/* Normal Tagged memory type at the corresponding MAIR index */
mov x10, #MAIR_ATTR_NORMAL_TAGGED
bfi mair, x10, #(8 * MT_NORMAL_TAGGED), #8
mov x10, #KERNEL_GCR_EL1
msr_s SYS_GCR_EL1, x10
/*
* If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
* RGSR_EL1.SEED must be non-zero for IRG to produce
* pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
* must initialize it.
*/
mrs x10, CNTVCT_EL0
ands x10, x10, #SYS_RGSR_EL1_SEED_MASK
csinc x10, x10, xzr, ne
lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
msr_s SYS_RGSR_EL1, x10
/* clear any pending tag check faults in TFSR*_EL1 */
msr_s SYS_TFSR_EL1, xzr
msr_s SYS_TFSRE0_EL1, xzr
/* set the TCR_EL1 bits */
mov_q x10, TCR_MTE_FLAGS
orr tcr, tcr, x10
1:
#endif
tcr_clear_errata_bits tcr, x9, x5 tcr_clear_errata_bits tcr, x9, x5
#ifdef CONFIG_ARM64_VA_BITS_52 #ifdef CONFIG_ARM64_VA_BITS_52
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment