Commit 6ad3df56 authored by Enze Li's avatar Enze Li Committed by Huacai Chen

LoongArch: Add KFENCE (Kernel Electric-Fence) support

The LoongArch architecture is quite different from other architectures.
When the allocating of KFENCE itself is done, it is mapped to the direct
mapping configuration window [1] by default on LoongArch.  It means that
it is not possible to use the page table mapped mode which required by
the KFENCE system and therefore it should be remapped to the appropriate
region.

This patch adds architecture specific implementation details for KFENCE.
In particular, this implements the required interface in <asm/kfence.h>.

Tested this patch by running the testcases and all passed.

[1] https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#virtual-address-space-and-address-translation-modeSigned-off-by: default avatarEnze Li <lienze@kylinos.cn>
Signed-off-by: default avatarHuacai Chen <chenhuacai@loongson.cn>
parent 95bb5b61
...@@ -92,6 +92,7 @@ config LOONGARCH ...@@ -92,6 +92,7 @@ config LOONGARCH
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB if PERF_EVENTS select HAVE_ARCH_KGDB if PERF_EVENTS
select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* KFENCE support for LoongArch.
*
* Author: Enze Li <lienze@kylinos.cn>
* Copyright (C) 2022-2023 KylinSoft Corporation.
*/
#ifndef _ASM_LOONGARCH_KFENCE_H
#define _ASM_LOONGARCH_KFENCE_H
#include <linux/kfence.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
static inline bool arch_kfence_init_pool(void)
{
int err;
char *kfence_pool = __kfence_pool;
struct vm_struct *area;
area = __get_vm_area_caller(KFENCE_POOL_SIZE, VM_IOREMAP,
KFENCE_AREA_START, KFENCE_AREA_END,
__builtin_return_address(0));
if (!area)
return false;
__kfence_pool = (char *)area->addr;
err = ioremap_page_range((unsigned long)__kfence_pool,
(unsigned long)__kfence_pool + KFENCE_POOL_SIZE,
virt_to_phys((void *)kfence_pool), PAGE_KERNEL);
if (err) {
free_vm_area(area);
__kfence_pool = kfence_pool;
return false;
}
return true;
}
/* Protect the given page and flush TLB. */
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
pte_t *pte = virt_to_kpte(addr);
if (WARN_ON(!pte) || pte_none(*pte))
return false;
if (protect)
set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT)));
else
set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT)));
preempt_disable();
local_flush_tlb_one(addr);
preempt_enable();
return true;
}
#endif /* _ASM_LOONGARCH_KFENCE_H */
...@@ -82,14 +82,23 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; ...@@ -82,14 +82,23 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE)) #define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
#define MODULES_END (MODULES_VADDR + SZ_256M) #define MODULES_END (MODULES_VADDR + SZ_256M)
#ifdef CONFIG_KFENCE
#define KFENCE_AREA_SIZE (((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 + 2) * PAGE_SIZE)
#else
#define KFENCE_AREA_SIZE 0
#endif
#define VMALLOC_START MODULES_END #define VMALLOC_START MODULES_END
#define VMALLOC_END \ #define VMALLOC_END \
(vm_map_base + \ (vm_map_base + \
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE) min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
#define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK)) #define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
#define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1) #define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
#define KFENCE_AREA_START (VMEMMAP_END + 1)
#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
#define pte_ERROR(e) \ #define pte_ERROR(e) \
pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
#ifndef __PAGETABLE_PMD_FOLDED #ifndef __PAGETABLE_PMD_FOLDED
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/kfence.h>
#include <asm/branch.h> #include <asm/branch.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
...@@ -30,7 +31,8 @@ ...@@ -30,7 +31,8 @@
int show_unhandled_signals = 1; int show_unhandled_signals = 1;
static void __kprobes no_context(struct pt_regs *regs, unsigned long address) static void __kprobes no_context(struct pt_regs *regs,
unsigned long write, unsigned long address)
{ {
const int field = sizeof(unsigned long) * 2; const int field = sizeof(unsigned long) * 2;
...@@ -38,6 +40,9 @@ static void __kprobes no_context(struct pt_regs *regs, unsigned long address) ...@@ -38,6 +40,9 @@ static void __kprobes no_context(struct pt_regs *regs, unsigned long address)
if (fixup_exception(regs)) if (fixup_exception(regs))
return; return;
if (kfence_handle_page_fault(address, write, regs))
return;
/* /*
* Oops. The kernel tried to access some bad page. We'll have to * Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice. * terminate things with extreme prejudice.
...@@ -51,14 +56,15 @@ static void __kprobes no_context(struct pt_regs *regs, unsigned long address) ...@@ -51,14 +56,15 @@ static void __kprobes no_context(struct pt_regs *regs, unsigned long address)
die("Oops", regs); die("Oops", regs);
} }
static void __kprobes do_out_of_memory(struct pt_regs *regs, unsigned long address) static void __kprobes do_out_of_memory(struct pt_regs *regs,
unsigned long write, unsigned long address)
{ {
/* /*
* We ran out of memory, call the OOM killer, and return the userspace * We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed). * (which will retry the fault, or kill us if we got oom-killed).
*/ */
if (!user_mode(regs)) { if (!user_mode(regs)) {
no_context(regs, address); no_context(regs, write, address);
return; return;
} }
pagefault_out_of_memory(); pagefault_out_of_memory();
...@@ -69,7 +75,7 @@ static void __kprobes do_sigbus(struct pt_regs *regs, ...@@ -69,7 +75,7 @@ static void __kprobes do_sigbus(struct pt_regs *regs,
{ {
/* Kernel mode? Handle exceptions or die */ /* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) { if (!user_mode(regs)) {
no_context(regs, address); no_context(regs, write, address);
return; return;
} }
...@@ -90,7 +96,7 @@ static void __kprobes do_sigsegv(struct pt_regs *regs, ...@@ -90,7 +96,7 @@ static void __kprobes do_sigsegv(struct pt_regs *regs,
/* Kernel mode? Handle exceptions or die */ /* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) { if (!user_mode(regs)) {
no_context(regs, address); no_context(regs, write, address);
return; return;
} }
...@@ -149,7 +155,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, ...@@ -149,7 +155,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
*/ */
if (address & __UA_LIMIT) { if (address & __UA_LIMIT) {
if (!user_mode(regs)) if (!user_mode(regs))
no_context(regs, address); no_context(regs, write, address);
else else
do_sigsegv(regs, write, address, si_code); do_sigsegv(regs, write, address, si_code);
return; return;
...@@ -211,7 +217,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, ...@@ -211,7 +217,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
if (fault_signal_pending(fault, regs)) { if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs)) if (!user_mode(regs))
no_context(regs, address); no_context(regs, write, address);
return; return;
} }
...@@ -232,7 +238,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, ...@@ -232,7 +238,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
mmap_read_unlock(mm); mmap_read_unlock(mm);
if (fault & VM_FAULT_OOM) { if (fault & VM_FAULT_OOM) {
do_out_of_memory(regs, address); do_out_of_memory(regs, write, address);
return; return;
} else if (fault & VM_FAULT_SIGSEGV) { } else if (fault & VM_FAULT_SIGSEGV) {
do_sigsegv(regs, write, address, si_code); do_sigsegv(regs, write, address, si_code);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment