Commit 670fe925 authored by Andrew Morton's avatar Andrew Morton Committed by Richard Henderson

[PATCH] Don't reverse the VMA list in touched_by_munmap()

touched_by_munmap() returns a reversed list of VMA's.  That makes things
harder in the low-latency-page-zapping patch.

So change touched_by_munmap() to return a VMA list which is in the original
order - ascending virtual addresses.

Oh, and rename it to <hugh>detach_vmas_to_be_unmapped()</hugh>.  It now
returns nothing, because we know that the VMA we passed in is the head of the
to-be-unmapped list.
parent 0c17b328
...@@ -1031,32 +1031,28 @@ static void unmap_region(struct mm_struct *mm, ...@@ -1031,32 +1031,28 @@ static void unmap_region(struct mm_struct *mm,
} }
/* /*
* Create a list of vma's touched by the unmap, * Create a list of vma's touched by the unmap, removing them from the mm's
* removing them from the VM lists as we go.. * vma list as we go..
* *
* Called with the page_table_lock held. * Called with the page_table_lock held.
*/ */
static struct vm_area_struct *touched_by_munmap(struct mm_struct *mm, static void
struct vm_area_struct *mpnt, detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct vm_area_struct *prev, unsigned long end)
unsigned long end)
{ {
struct vm_area_struct **npp, *touched; struct vm_area_struct **insertion_point;
struct vm_area_struct *tail_vma = NULL;
npp = (prev ? &prev->vm_next : &mm->mmap); insertion_point = (prev ? &prev->vm_next : &mm->mmap);
touched = NULL;
do { do {
struct vm_area_struct *next = mpnt->vm_next; rb_erase(&vma->vm_rb, &mm->mm_rb);
mpnt->vm_next = touched;
touched = mpnt;
rb_erase(&mpnt->vm_rb, &mm->mm_rb);
mm->map_count--; mm->map_count--;
mpnt = next; tail_vma = vma;
} while (mpnt && mpnt->vm_start < end); vma = vma->vm_next;
*npp = mpnt; } while (vma && vma->vm_start < end);
mm->mmap_cache = NULL; /* Kill the cache. */ *insertion_point = vma;
return touched; tail_vma->vm_next = NULL;
mm->mmap_cache = NULL; /* Kill the cache. */
} }
/* /*
...@@ -1152,7 +1148,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) ...@@ -1152,7 +1148,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
* Remove the vma's, and unmap the actual pages * Remove the vma's, and unmap the actual pages
*/ */
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
mpnt = touched_by_munmap(mm, mpnt, prev, end); detach_vmas_to_be_unmapped(mm, mpnt, prev, end);
unmap_region(mm, mpnt, prev, start, end); unmap_region(mm, mpnt, prev, start, end);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment