Commit e41ba111 authored by Sven Schnelle's avatar Sven Schnelle Committed by Heiko Carstens

s390: add support for KFENCE

Signed-off-by: default avatarSven Schnelle <svens@linux.ibm.com>
[hca@linux.ibm.com: simplify/rework code]
Link: https://lore.kernel.org/r/20210728190254.3921642-4-hca@linux.ibm.comSigned-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent f99e12b2
...@@ -138,6 +138,7 @@ config S390 ...@@ -138,6 +138,7 @@ config S390
select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN select HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_VMALLOC select HAVE_ARCH_KASAN_VMALLOC
select HAVE_ARCH_KFENCE
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_SOFT_DIRTY select HAVE_ARCH_SOFT_DIRTY
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_KFENCE_H
#define _ASM_S390_KFENCE_H
#include <linux/mm.h>
#include <linux/kfence.h>
#include <asm/set_memory.h>
#include <asm/page.h>
void __kernel_map_pages(struct page *page, int numpages, int enable);
static __always_inline bool arch_kfence_init_pool(void)
{
return true;
}
#define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)
/*
* Do not split kfence pool to 4k mapping with arch_kfence_init_pool(),
* but earlier where page table allocations still happen with memblock.
* Reason is that arch_kfence_init_pool() gets called when the system
* is still in a limbo state - disabling and enabling bottom halves is
* not yet allowed, but that is what our page_table_alloc() would do.
*/
static __always_inline void kfence_split_mapping(void)
{
#ifdef CONFIG_KFENCE
unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT;
set_memory_4k((unsigned long)__kfence_pool, pool_pages);
#endif
}
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
__kernel_map_pages(virt_to_page(addr), 1, !protect);
return true;
}
#endif /* _ASM_S390_KFENCE_H */
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/kfence.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/diag.h> #include <asm/diag.h>
#include <asm/gmap.h> #include <asm/gmap.h>
...@@ -356,6 +357,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) ...@@ -356,6 +357,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
unsigned long address; unsigned long address;
unsigned int flags; unsigned int flags;
vm_fault_t fault; vm_fault_t fault;
bool is_write;
tsk = current; tsk = current;
/* /*
...@@ -369,6 +371,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) ...@@ -369,6 +371,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
mm = tsk->mm; mm = tsk->mm;
trans_exc_code = regs->int_parm_long; trans_exc_code = regs->int_parm_long;
address = trans_exc_code & __FAIL_ADDR_MASK;
is_write = (trans_exc_code & store_indication) == 0x400;
/* /*
* Verify that the fault happened in user space, that * Verify that the fault happened in user space, that
...@@ -379,6 +383,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) ...@@ -379,6 +383,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
type = get_fault_type(regs); type = get_fault_type(regs);
switch (type) { switch (type) {
case KERNEL_FAULT: case KERNEL_FAULT:
if (kfence_handle_page_fault(address, is_write, regs))
return 0;
goto out; goto out;
case USER_FAULT: case USER_FAULT:
case GMAP_FAULT: case GMAP_FAULT:
...@@ -387,12 +393,11 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) ...@@ -387,12 +393,11 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
break; break;
} }
address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
flags = FAULT_FLAG_DEFAULT; flags = FAULT_FLAG_DEFAULT;
if (user_mode(regs)) if (user_mode(regs))
flags |= FAULT_FLAG_USER; flags |= FAULT_FLAG_USER;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) if (access == VM_WRITE || is_write)
flags |= FAULT_FLAG_WRITE; flags |= FAULT_FLAG_WRITE;
mmap_read_lock(mm); mmap_read_lock(mm);
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/kfence.h>
#include <asm/ptdump.h> #include <asm/ptdump.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
...@@ -200,7 +201,7 @@ void __init mem_init(void) ...@@ -200,7 +201,7 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
pv_init(); pv_init();
kfence_split_mapping();
/* Setup guest page hinting */ /* Setup guest page hinting */
cmma_init(); cmma_init();
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/facility.h> #include <asm/facility.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/kfence.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/set_memory.h> #include <asm/set_memory.h>
...@@ -326,7 +327,7 @@ int __set_memory(unsigned long addr, int numpages, unsigned long flags) ...@@ -326,7 +327,7 @@ int __set_memory(unsigned long addr, int numpages, unsigned long flags)
return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags); return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
} }
#ifdef CONFIG_DEBUG_PAGEALLOC #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
static void ipte_range(pte_t *pte, unsigned long address, int nr) static void ipte_range(pte_t *pte, unsigned long address, int nr)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment