Commit a8f0c31f authored by Max Filippov's avatar Max Filippov

xtensa: noMMU: allow handling protection faults

Many xtensa CPU cores without full MMU still have memory protection
features capable of raising exceptions for invalid instruction
fetches/data access. Allow handling such exceptions. This improves
behavior of processes that pass invalid memory pointers to syscalls in
noMMU configs: in case of exception the kernel instead of killing the
process is now able to return -EINVAL from a syscall.

Introduce CONFIG_PFAULT that controls whether protection fault code is
enabled and register handlers for common memory protection exceptions
when it is enabled.
Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
parent 270a8306
...@@ -79,6 +79,7 @@ config STACKTRACE_SUPPORT ...@@ -79,6 +79,7 @@ config STACKTRACE_SUPPORT
config MMU config MMU
def_bool n def_bool n
select PFAULT
config HAVE_XTENSA_GPIO32 config HAVE_XTENSA_GPIO32
def_bool n def_bool n
...@@ -178,6 +179,16 @@ config XTENSA_FAKE_NMI ...@@ -178,6 +179,16 @@ config XTENSA_FAKE_NMI
If unsure, say N. If unsure, say N.
config PFAULT
bool "Handle protection faults" if EXPERT && !MMU
default y
help
Handle protection faults. MMU configurations must enable it.
noMMU configurations may disable it if used memory map never
generates protection faults or faults are always fatal.
If unsure, say Y.
config XTENSA_UNALIGNED_USER config XTENSA_UNALIGNED_USER
bool "Unaligned memory access in user space" bool "Unaligned memory access in user space"
help help
......
...@@ -110,21 +110,21 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = { ...@@ -110,21 +110,21 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned }, { EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
#endif #endif
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
{ EXCCAUSE_ITLB_MISS, 0, do_page_fault }, { EXCCAUSE_ITLB_MISS, 0, do_page_fault },
{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss}, { EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
#endif /* CONFIG_MMU */
#ifdef CONFIG_PFAULT
{ EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit }, { EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault }, { EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
/* EXCCAUSE_SIZE_RESTRICTION unhandled */
{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault }, { EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
{ EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit }, { EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault }, { EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
/* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault }, { EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault }, { EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
#endif /* CONFIG_MMU */ #endif
/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */ /* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
#if XTENSA_HAVE_COPROCESSOR(0) #if XTENSA_HAVE_COPROCESSOR(0)
COPROCESSOR(0), COPROCESSOR(0),
......
...@@ -4,7 +4,8 @@ ...@@ -4,7 +4,8 @@
# #
obj-y := init.o misc.o obj-y := init.o misc.o
obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o obj-$(CONFIG_PFAULT) += fault.o
obj-$(CONFIG_MMU) += cache.o ioremap.o mmu.o tlb.o
obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_KASAN) += kasan_init.o obj-$(CONFIG_KASAN) += kasan_init.o
......
...@@ -25,6 +25,7 @@ void bad_page_fault(struct pt_regs*, unsigned long, int); ...@@ -25,6 +25,7 @@ void bad_page_fault(struct pt_regs*, unsigned long, int);
static void vmalloc_fault(struct pt_regs *regs, unsigned int address) static void vmalloc_fault(struct pt_regs *regs, unsigned int address)
{ {
#ifdef CONFIG_MMU
/* Synchronize this task's top level page-table /* Synchronize this task's top level page-table
* with the 'reference' page table. * with the 'reference' page table.
*/ */
...@@ -71,6 +72,9 @@ static void vmalloc_fault(struct pt_regs *regs, unsigned int address) ...@@ -71,6 +72,9 @@ static void vmalloc_fault(struct pt_regs *regs, unsigned int address)
bad_page_fault: bad_page_fault:
bad_page_fault(regs, address, SIGKILL); bad_page_fault(regs, address, SIGKILL);
#else
WARN_ONCE(1, "%s in noMMU configuration\n", __func__);
#endif
} }
/* /*
* This routine handles page faults. It determines the address, * This routine handles page faults. It determines the address,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment