Commit f1dfe022 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] Use names, not numbers for pagefault types

This is Bill Irwin's cleanup patch which gives symbolic names to the
fault types:

	#define VM_FAULT_OOM	(-1)
	#define VM_FAULT_SIGBUS	0
	#define VM_FAULT_MINOR	1
	#define VM_FAULT_MAJOR	2

Only arch/i386 has been updated - other architectures can do this too.
parent 5feb041e
...@@ -56,12 +56,16 @@ int __verify_write(const void * addr, unsigned long size) ...@@ -56,12 +56,16 @@ int __verify_write(const void * addr, unsigned long size)
for (;;) { for (;;) {
survive: survive:
{ switch (handle_mm_fault(current->mm, vma, start, 1)) {
int fault = handle_mm_fault(current->mm, vma, start, 1); case VM_FAULT_SIGBUS:
if (!fault)
goto bad_area; goto bad_area;
if (fault < 0) case VM_FAULT_OOM:
goto out_of_memory; goto out_of_memory;
case VM_FAULT_MINOR:
case VM_FAULT_MAJOR:
break;
default:
BUG();
} }
if (!size) if (!size)
break; break;
...@@ -239,16 +243,18 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -239,16 +243,18 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
* the fault. * the fault.
*/ */
switch (handle_mm_fault(mm, vma, address, write)) { switch (handle_mm_fault(mm, vma, address, write)) {
case 1: case VM_FAULT_MINOR:
tsk->min_flt++; tsk->min_flt++;
break; break;
case 2: case VM_FAULT_MAJOR:
tsk->maj_flt++; tsk->maj_flt++;
break; break;
case 0: case VM_FAULT_SIGBUS:
goto do_sigbus; goto do_sigbus;
default: case VM_FAULT_OOM:
goto out_of_memory; goto out_of_memory;
default:
BUG();
} }
/* /*
......
...@@ -305,6 +305,16 @@ static inline void set_page_zone(struct page *page, unsigned long zone_num) ...@@ -305,6 +305,16 @@ static inline void set_page_zone(struct page *page, unsigned long zone_num)
#define NOPAGE_SIGBUS (NULL) #define NOPAGE_SIGBUS (NULL)
#define NOPAGE_OOM ((struct page *) (-1)) #define NOPAGE_OOM ((struct page *) (-1))
/*
* Different kinds of faults, as returned by handle_mm_fault().
* Used to decide whether a process gets delivered SIGBUS or
* just gets major/minor fault counters bumped up.
*/
#define VM_FAULT_OOM (-1)
#define VM_FAULT_SIGBUS 0
#define VM_FAULT_MINOR 1
#define VM_FAULT_MAJOR 2
/* The array of struct pages */ /* The array of struct pages */
extern struct page *mem_map; extern struct page *mem_map;
......
...@@ -503,18 +503,18 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long ...@@ -503,18 +503,18 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long
while (!(map = follow_page(mm, start, write))) { while (!(map = follow_page(mm, start, write))) {
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
switch (handle_mm_fault(mm, vma, start, write)) { switch (handle_mm_fault(mm, vma, start, write)) {
case 1: case VM_FAULT_MINOR:
tsk->min_flt++; tsk->min_flt++;
break; break;
case 2: case VM_FAULT_MAJOR:
tsk->maj_flt++; tsk->maj_flt++;
break; break;
case 0: case VM_FAULT_SIGBUS:
if (i) return i; return i ? i : -EFAULT;
return -EFAULT; case VM_FAULT_OOM:
return i ? i : -ENOMEM;
default: default:
if (i) return i; BUG();
return -ENOMEM;
} }
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
} }
...@@ -968,7 +968,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, ...@@ -968,7 +968,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte)))); establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
pte_unmap(page_table); pte_unmap(page_table);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return 1; /* Minor fault */ return VM_FAULT_MINOR;
} }
} }
pte_unmap(page_table); pte_unmap(page_table);
...@@ -1002,16 +1002,21 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, ...@@ -1002,16 +1002,21 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
page_cache_release(new_page); page_cache_release(new_page);
page_cache_release(old_page); page_cache_release(old_page);
return 1; /* Minor fault */ return VM_FAULT_MINOR;
bad_wp_page: bad_wp_page:
pte_unmap(page_table); pte_unmap(page_table);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
printk(KERN_ERR "do_wp_page: bogus page at address %08lx\n", address); printk(KERN_ERR "do_wp_page: bogus page at address %08lx\n", address);
return -1; /*
* This should really halt the system so it can be debugged or
* at least the kernel stops what it's doing before it corrupts
* data, but for the moment just pretend this is OOM.
*/
return VM_FAULT_OOM;
no_mem: no_mem:
page_cache_release(old_page); page_cache_release(old_page);
return -1; return VM_FAULT_OOM;
} }
static void vmtruncate_list(list_t *head, unsigned long pgoff) static void vmtruncate_list(list_t *head, unsigned long pgoff)
...@@ -1135,7 +1140,7 @@ static int do_swap_page(struct mm_struct * mm, ...@@ -1135,7 +1140,7 @@ static int do_swap_page(struct mm_struct * mm,
struct page *page; struct page *page;
swp_entry_t entry = pte_to_swp_entry(orig_pte); swp_entry_t entry = pte_to_swp_entry(orig_pte);
pte_t pte; pte_t pte;
int ret = 1; int ret = VM_FAULT_MINOR;
pte_unmap(page_table); pte_unmap(page_table);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
...@@ -1148,17 +1153,19 @@ static int do_swap_page(struct mm_struct * mm, ...@@ -1148,17 +1153,19 @@ static int do_swap_page(struct mm_struct * mm,
* Back out if somebody else faulted in this pte while * Back out if somebody else faulted in this pte while
* we released the page table lock. * we released the page table lock.
*/ */
int retval;
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
page_table = pte_offset_map(pmd, address); page_table = pte_offset_map(pmd, address);
retval = pte_same(*page_table, orig_pte) ? -1 : 1; if (pte_same(*page_table, orig_pte))
ret = VM_FAULT_OOM;
else
ret = VM_FAULT_MINOR;
pte_unmap(page_table); pte_unmap(page_table);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return retval; return ret;
} }
/* Had to read the page from swap area: Major fault */ /* Had to read the page from swap area: Major fault */
ret = 2; ret = VM_FAULT_MAJOR;
} }
lock_page(page); lock_page(page);
...@@ -1174,7 +1181,7 @@ static int do_swap_page(struct mm_struct * mm, ...@@ -1174,7 +1181,7 @@ static int do_swap_page(struct mm_struct * mm,
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
return 1; return VM_FAULT_MINOR;
} }
/* The page isn't present yet, go ahead with the fault. */ /* The page isn't present yet, go ahead with the fault. */
...@@ -1232,7 +1239,7 @@ static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma, ...@@ -1232,7 +1239,7 @@ static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma,
pte_unmap(page_table); pte_unmap(page_table);
page_cache_release(page); page_cache_release(page);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return 1; return VM_FAULT_MINOR;
} }
mm->rss++; mm->rss++;
flush_page_to_ram(page); flush_page_to_ram(page);
...@@ -1246,10 +1253,10 @@ static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma, ...@@ -1246,10 +1253,10 @@ static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma,
/* No need to invalidate - it was non-present before */ /* No need to invalidate - it was non-present before */
update_mmu_cache(vma, addr, entry); update_mmu_cache(vma, addr, entry);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return 1; /* Minor fault */ return VM_FAULT_MINOR;
no_mem: no_mem:
return -1; return VM_FAULT_OOM;
} }
/* /*
...@@ -1277,10 +1284,11 @@ static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma, ...@@ -1277,10 +1284,11 @@ static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, 0); new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, 0);
if (new_page == NULL) /* no page was available -- SIGBUS */ /* no page was available -- either SIGBUS or OOM */
return 0; if (new_page == NOPAGE_SIGBUS)
return VM_FAULT_SIGBUS;
if (new_page == NOPAGE_OOM) if (new_page == NOPAGE_OOM)
return -1; return VM_FAULT_OOM;
/* /*
* Should we do an early C-O-W break? * Should we do an early C-O-W break?
...@@ -1289,7 +1297,7 @@ static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma, ...@@ -1289,7 +1297,7 @@ static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
struct page * page = alloc_page(GFP_HIGHUSER); struct page * page = alloc_page(GFP_HIGHUSER);
if (!page) { if (!page) {
page_cache_release(new_page); page_cache_release(new_page);
return -1; return VM_FAULT_OOM;
} }
copy_user_highpage(page, new_page, address); copy_user_highpage(page, new_page, address);
page_cache_release(new_page); page_cache_release(new_page);
...@@ -1325,13 +1333,13 @@ static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma, ...@@ -1325,13 +1333,13 @@ static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
pte_unmap(page_table); pte_unmap(page_table);
page_cache_release(new_page); page_cache_release(new_page);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return 1; return VM_FAULT_MINOR;
} }
/* no need to invalidate: a not-present page shouldn't be cached */ /* no need to invalidate: a not-present page shouldn't be cached */
update_mmu_cache(vma, address, entry); update_mmu_cache(vma, address, entry);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return 2; /* Major fault */ return VM_FAULT_MAJOR;
} }
/* /*
...@@ -1383,7 +1391,7 @@ static inline int handle_pte_fault(struct mm_struct *mm, ...@@ -1383,7 +1391,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
establish_pte(vma, address, pte, entry); establish_pte(vma, address, pte, entry);
pte_unmap(pte); pte_unmap(pte);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return 1; return VM_FAULT_MINOR;
} }
/* /*
...@@ -1411,7 +1419,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma, ...@@ -1411,7 +1419,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
return handle_pte_fault(mm, vma, address, write_access, pte, pmd); return handle_pte_fault(mm, vma, address, write_access, pte, pmd);
} }
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return -1; return VM_FAULT_OOM;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment