Commit 295b7366 authored by Hirokazu Takata's avatar Hirokazu Takata Committed by Linus Torvalds

[PATCH] m32r: Cause SIGSEGV for nonexec page execution

- Cause a segmentation fault for an illegal execution of a code on
  non-executable memory page.
Signed-off-by: default avatarNaoto Sugai <sugai@isl.melco.co.jp>
Signed-off-by: default avatarHirokazu Takata <takata@linux-m32r.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 2c7686ce
...@@ -96,6 +96,11 @@ void bust_spinlocks(int yes) ...@@ -96,6 +96,11 @@ void bust_spinlocks(int yes)
* bit 2 == 0 means kernel, 1 means user-mode * bit 2 == 0 means kernel, 1 means user-mode
* bit 3 == 0 means data, 1 means instruction * bit 3 == 0 means data, 1 means instruction
*======================================================================*/ *======================================================================*/
#define ACE_PROTECTION 1
#define ACE_WRITE 2
#define ACE_USERMODE 4
#define ACE_INSTRUCTION 8
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
unsigned long address) unsigned long address)
{ {
...@@ -126,10 +131,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -126,10 +131,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
* nothing more. * nothing more.
* *
* This verifies that the fault happens in kernel space * This verifies that the fault happens in kernel space
* (error_code & 4) == 0, and that the fault was not a * (error_code & ACE_USEMODE) == 0, and that the fault was not a
* protection error (error_code & 1) == 0. * protection error (error_code & ACE_PROTECTION) == 0.
*/ */
if (address >= TASK_SIZE && !(error_code & 4)) if (address >= TASK_SIZE && !(error_code & ACE_USERMODE))
goto vmalloc_fault; goto vmalloc_fault;
mm = tsk->mm; mm = tsk->mm;
...@@ -157,7 +162,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -157,7 +162,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
* thus avoiding the deadlock. * thus avoiding the deadlock.
*/ */
if (!down_read_trylock(&mm->mmap_sem)) { if (!down_read_trylock(&mm->mmap_sem)) {
if ((error_code & 4) == 0 && if ((error_code & ACE_USERMODE) == 0 &&
!search_exception_tables(regs->psw)) !search_exception_tables(regs->psw))
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
...@@ -171,7 +176,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -171,7 +176,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
if (!(vma->vm_flags & VM_GROWSDOWN)) if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area; goto bad_area;
#if 0 #if 0
if (error_code & 4) { if (error_code & ACE_USERMODE) {
/* /*
* accessing the stack below "spu" is always a bug. * accessing the stack below "spu" is always a bug.
* The "+ 4" is there due to the push instruction * The "+ 4" is there due to the push instruction
...@@ -191,27 +196,33 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -191,27 +196,33 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
good_area: good_area:
info.si_code = SEGV_ACCERR; info.si_code = SEGV_ACCERR;
write = 0; write = 0;
switch (error_code & 3) { switch (error_code & (ACE_WRITE|ACE_PROTECTION)) {
default: /* 3: write, present */ default: /* 3: write, present */
/* fall through */ /* fall through */
case 2: /* write, not present */ case ACE_WRITE: /* write, not present */
if (!(vma->vm_flags & VM_WRITE)) if (!(vma->vm_flags & VM_WRITE))
goto bad_area; goto bad_area;
write++; write++;
break; break;
case 1: /* read, present */ case ACE_PROTECTION: /* read, present */
case 0: /* read, not present */ case 0: /* read, not present */
if (!(vma->vm_flags & (VM_READ | VM_EXEC))) if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area; goto bad_area;
} }
/*
* For instruction access exception, check if the area is executable
*/
if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC))
goto bad_area;
survive: survive:
/* /*
* If for any reason at all we couldn't handle the fault, * If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
addr = (address & PAGE_MASK) | (error_code & 8); addr = (address & PAGE_MASK) | (error_code & ACE_INSTRUCTION);
switch (handle_mm_fault(mm, vma, addr, write)) { switch (handle_mm_fault(mm, vma, addr, write)) {
case VM_FAULT_MINOR: case VM_FAULT_MINOR:
tsk->min_flt++; tsk->min_flt++;
...@@ -239,7 +250,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -239,7 +250,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
bad_area_nosemaphore: bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */ /* User mode accesses just cause a SIGSEGV */
if (error_code & 4) { if (error_code & ACE_USERMODE) {
tsk->thread.address = address; tsk->thread.address = address;
tsk->thread.error_code = error_code | (address >= TASK_SIZE); tsk->thread.error_code = error_code | (address >= TASK_SIZE);
tsk->thread.trap_no = 14; tsk->thread.trap_no = 14;
...@@ -295,7 +306,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -295,7 +306,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
goto survive; goto survive;
} }
printk("VM: killing process %s\n", tsk->comm); printk("VM: killing process %s\n", tsk->comm);
if (error_code & 4) if (error_code & ACE_USERMODE)
do_exit(SIGKILL); do_exit(SIGKILL);
goto no_context; goto no_context;
...@@ -303,7 +314,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -303,7 +314,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
/* Kernel mode? Handle exception or die */ /* Kernel mode? Handle exception or die */
if (!(error_code & 4)) if (!(error_code & ACE_USERMODE))
goto no_context; goto no_context;
tsk->thread.address = address; tsk->thread.address = address;
...@@ -352,7 +363,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -352,7 +363,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
if (!pte_present(*pte_k)) if (!pte_present(*pte_k))
goto no_context; goto no_context;
addr = (address & PAGE_MASK) | (error_code & 8); addr = (address & PAGE_MASK) | (error_code & ACE_INSTRUCTION);
update_mmu_cache(NULL, addr, *pte_k); update_mmu_cache(NULL, addr, *pte_k);
return; return;
} }
......
...@@ -148,8 +148,7 @@ extern unsigned long empty_zero_page[1024]; ...@@ -148,8 +148,7 @@ extern unsigned long empty_zero_page[1024];
__pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ \ __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ \
| _PAGE_USER | _PAGE_ACCESSED) | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY \ #define PAGE_COPY \
__pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_USER \ __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_USER | _PAGE_ACCESSED)
| _PAGE_ACCESSED)
#define PAGE_COPY_X \ #define PAGE_COPY_X \
__pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_USER \ __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_USER \
| _PAGE_ACCESSED) | _PAGE_ACCESSED)
...@@ -190,23 +189,23 @@ extern unsigned long empty_zero_page[1024]; ...@@ -190,23 +189,23 @@ extern unsigned long empty_zero_page[1024];
* the same are read. Also, write permissions imply read permissions. * the same are read. Also, write permissions imply read permissions.
* This is the closest we can get.. * This is the closest we can get..
*/ */
/* rwx */ /* xwr */
#define __P000 PAGE_NONE #define __P000 PAGE_NONE
#define __P001 PAGE_READONLY_X #define __P001 PAGE_READONLY
#define __P010 PAGE_COPY_X #define __P010 PAGE_COPY
#define __P011 PAGE_COPY_X #define __P011 PAGE_COPY
#define __P100 PAGE_READONLY #define __P100 PAGE_READONLY_X
#define __P101 PAGE_READONLY_X #define __P101 PAGE_READONLY_X
#define __P110 PAGE_COPY_X #define __P110 PAGE_COPY_X
#define __P111 PAGE_COPY_X #define __P111 PAGE_COPY_X
#define __S000 PAGE_NONE #define __S000 PAGE_NONE
#define __S001 PAGE_READONLY_X #define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED #define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED_X #define __S011 PAGE_SHARED
#define __S100 PAGE_READONLY #define __S100 PAGE_READONLY_X
#define __S101 PAGE_READONLY_X #define __S101 PAGE_READONLY_X
#define __S110 PAGE_SHARED #define __S110 PAGE_SHARED_X
#define __S111 PAGE_SHARED_X #define __S111 PAGE_SHARED_X
/* page table for 0-4MB for everybody */ /* page table for 0-4MB for everybody */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment