Commit 6df888f6 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] sync get_user_pages with 2.4

Forward port of get_user_pages() change from 2.4.

- If the vma is marked as VM_IO area then fail the map.

  This prevents kernel deadlocks which occur when applications which
  have frame buffers mapped try to dump core.  Also prevents a kernel
  oops when a debugger is attached to a process which has an IO mmap.

- Check that the mapped page is inside mem_map[] (pfn_valid).

- inline follow_page() and remove the preempt_disable()s.  It has
  only a single callsite and is called under spinloclk.
parent 88cb17e4
...@@ -429,9 +429,11 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned ...@@ -429,9 +429,11 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned
} }
/* /*
* Do a quick page-table lookup for a single page. * Do a quick page-table lookup for a single page.
* mm->page_table_lock must be held.
*/ */
static struct page * follow_page(struct mm_struct *mm, unsigned long address, int write) static inline struct page *
follow_page(struct mm_struct *mm, unsigned long address, int write)
{ {
pgd_t *pgd; pgd_t *pgd;
pmd_t *pmd; pmd_t *pmd;
...@@ -446,19 +448,14 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in ...@@ -446,19 +448,14 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in
if (pmd_none(*pmd) || pmd_bad(*pmd)) if (pmd_none(*pmd) || pmd_bad(*pmd))
goto out; goto out;
preempt_disable();
ptep = pte_offset_map(pmd, address); ptep = pte_offset_map(pmd, address);
if (!ptep) { if (!ptep)
preempt_enable();
goto out; goto out;
}
pte = *ptep; pte = *ptep;
pte_unmap(ptep); pte_unmap(ptep);
preempt_enable();
if (pte_present(pte)) { if (pte_present(pte)) {
if (!write || if (!write || (pte_write(pte) && pte_dirty(pte))) {
(pte_write(pte) && pte_dirty(pte))) {
pfn = pte_pfn(pte); pfn = pte_pfn(pte);
if (pfn_valid(pfn)) if (pfn_valid(pfn))
return pfn_to_page(pfn); return pfn_to_page(pfn);
...@@ -475,13 +472,17 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in ...@@ -475,13 +472,17 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in
* with IO-aperture pages in kiobufs. * with IO-aperture pages in kiobufs.
*/ */
static inline struct page * get_page_map(struct page *page) static inline struct page *get_page_map(struct page *page)
{ {
if (!pfn_valid(page_to_pfn(page)))
return 0;
return page; return page;
} }
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
int len, int write, int force, struct page **pages, struct vm_area_struct **vmas) int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int len, int write, int force,
struct page **pages, struct vm_area_struct **vmas)
{ {
int i; int i;
unsigned int flags; unsigned int flags;
...@@ -493,14 +494,14 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long ...@@ -493,14 +494,14 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long
flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
i = 0; i = 0;
do { do {
struct vm_area_struct * vma; struct vm_area_struct * vma;
vma = find_extend_vma(mm, start); vma = find_extend_vma(mm, start);
if ( !vma || !(flags & vma->vm_flags) ) if (!vma || (pages && (vma->vm_flags & VM_IO))
|| !(flags & vma->vm_flags))
return i ? : -EFAULT; return i ? : -EFAULT;
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
...@@ -508,7 +509,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long ...@@ -508,7 +509,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long
struct page *map; struct page *map;
while (!(map = follow_page(mm, start, write))) { while (!(map = follow_page(mm, start, write))) {
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
switch (handle_mm_fault(mm, vma, start, write)) { switch (handle_mm_fault(mm,vma,start,write)) {
case VM_FAULT_MINOR: case VM_FAULT_MINOR:
tsk->min_flt++; tsk->min_flt++;
break; break;
...@@ -526,11 +527,14 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long ...@@ -526,11 +527,14 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long
} }
if (pages) { if (pages) {
pages[i] = get_page_map(map); pages[i] = get_page_map(map);
/* FIXME: call the correct function, if (!pages[i]) {
* depending on the type of the found page spin_unlock(&mm->page_table_lock);
*/ while (i--)
if (pages[i]) page_cache_release(pages[i]);
page_cache_get(pages[i]); i = -EFAULT;
goto out;
}
page_cache_get(pages[i]);
} }
if (vmas) if (vmas)
vmas[i] = vma; vmas[i] = vma;
...@@ -540,6 +544,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long ...@@ -540,6 +544,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long
} while(len && start < vma->vm_end); } while(len && start < vma->vm_end);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
} while(len); } while(len);
out:
return i; return i;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment