Commit cac4d1dc authored by Pekka Enberg's avatar Pekka Enberg Committed by Palmer Dabbelt

riscv/mm/fault: Move no context handling to no_context()

This patch moves the no context handling in do_page_fault() to
no_context() function and converts gotos to calls to the new function.
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarPalmer Dabbelt <palmerdabbelt@google.com>
parent 43632871
...@@ -19,6 +19,24 @@ ...@@ -19,6 +19,24 @@
#include "../kernel/head.h" #include "../kernel/head.h"
static inline void no_context(struct pt_regs *regs, unsigned long addr)
{
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
(addr < PAGE_SIZE) ? "NULL pointer dereference" :
"paging request", addr);
die(regs, "Oops");
do_exit(SIGKILL);
}
/* /*
* This routine handles page faults. It determines the address and the * This routine handles page faults. It determines the address and the
* problem, and then passes it off to one of the appropriate routines. * problem, and then passes it off to one of the appropriate routines.
...@@ -59,8 +77,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -59,8 +77,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
* If we're in an interrupt, have no user context, or are running * If we're in an interrupt, have no user context, or are running
* in an atomic region, then we must not take the fault. * in an atomic region, then we must not take the fault.
*/ */
if (unlikely(faulthandler_disabled() || !mm)) if (unlikely(faulthandler_disabled() || !mm)) {
goto no_context; no_context(regs, addr);
return;
}
if (user_mode(regs)) if (user_mode(regs))
flags |= FAULT_FLAG_USER; flags |= FAULT_FLAG_USER;
...@@ -153,38 +173,29 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -153,38 +173,29 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
return; return;
} }
no_context: no_context(regs, addr);
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return; return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
(addr < PAGE_SIZE) ? "NULL pointer dereference" :
"paging request", addr);
die(regs, "Oops");
do_exit(SIGKILL);
/* /*
* We ran out of memory, call the OOM killer, and return the userspace * We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed). * (which will retry the fault, or kill us if we got oom-killed).
*/ */
out_of_memory: out_of_memory:
mmap_read_unlock(mm); mmap_read_unlock(mm);
if (!user_mode(regs)) if (!user_mode(regs)) {
goto no_context; no_context(regs, addr);
return;
}
pagefault_out_of_memory(); pagefault_out_of_memory();
return; return;
do_sigbus: do_sigbus:
mmap_read_unlock(mm); mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */ /* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) if (!user_mode(regs)) {
goto no_context; no_context(regs, addr);
return;
}
do_trap(regs, SIGBUS, BUS_ADRERR, addr); do_trap(regs, SIGBUS, BUS_ADRERR, addr);
return; return;
...@@ -213,19 +224,25 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -213,19 +224,25 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index; pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
pgd_k = init_mm.pgd + index; pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k)) if (!pgd_present(*pgd_k)) {
goto no_context; no_context(regs, addr);
return;
}
set_pgd(pgd, *pgd_k); set_pgd(pgd, *pgd_k);
p4d = p4d_offset(pgd, addr); p4d = p4d_offset(pgd, addr);
p4d_k = p4d_offset(pgd_k, addr); p4d_k = p4d_offset(pgd_k, addr);
if (!p4d_present(*p4d_k)) if (!p4d_present(*p4d_k)) {
goto no_context; no_context(regs, addr);
return;
}
pud = pud_offset(p4d, addr); pud = pud_offset(p4d, addr);
pud_k = pud_offset(p4d_k, addr); pud_k = pud_offset(p4d_k, addr);
if (!pud_present(*pud_k)) if (!pud_present(*pud_k)) {
goto no_context; no_context(regs, addr);
return;
}
/* /*
* Since the vmalloc area is global, it is unnecessary * Since the vmalloc area is global, it is unnecessary
...@@ -233,8 +250,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -233,8 +250,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
*/ */
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
pmd_k = pmd_offset(pud_k, addr); pmd_k = pmd_offset(pud_k, addr);
if (!pmd_present(*pmd_k)) if (!pmd_present(*pmd_k)) {
goto no_context; no_context(regs, addr);
return;
}
set_pmd(pmd, *pmd_k); set_pmd(pmd, *pmd_k);
/* /*
...@@ -244,8 +263,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -244,8 +263,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
* silently loop forever. * silently loop forever.
*/ */
pte_k = pte_offset_kernel(pmd_k, addr); pte_k = pte_offset_kernel(pmd_k, addr);
if (!pte_present(*pte_k)) if (!pte_present(*pte_k)) {
goto no_context; no_context(regs, addr);
return;
}
/* /*
* The kernel assumes that TLBs don't cache invalid * The kernel assumes that TLBs don't cache invalid
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment