Commit a888f1f5 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] mm: vma_adjust insert file earlier

For those arches (arm and parisc) which use the i_mmap tree to implement
flush_dcache_page, during split_vma there's a small window in vma_adjust when
flush_dcache_mmap_lock is dropped, and pages in the split-off part of the vma
might for an instant be invisible to __flush_dcache_page.

Though we're more solid there than ever before, I guess it's a bad idea to
leave that window: so (with regret, it was structurally nicer before) take
__vma_link_file (and vma_prio_tree_init) out of __vma_link.

vma_prio_tree_init (which NULLs a few fields) is actually only needed when
copying a vma, not when a new one has just been memset to 0.

__insert_vm_struct is used by nothing but vma_adjust's split_vma case:
comment it accordingly, remove its mark_mm_hugetlb (it can never create
a new kind of vma) and its validate_mm (another follows immediately).
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent e495dd35
...@@ -293,10 +293,8 @@ __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -293,10 +293,8 @@ __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct rb_node **rb_link, struct vm_area_struct *prev, struct rb_node **rb_link,
struct rb_node *rb_parent) struct rb_node *rb_parent)
{ {
vma_prio_tree_init(vma);
__vma_link_list(mm, vma, prev, rb_parent); __vma_link_list(mm, vma, prev, rb_parent);
__vma_link_rb(mm, vma, rb_link, rb_parent); __vma_link_rb(mm, vma, rb_link, rb_parent);
__vma_link_file(vma);
__anon_vma_link(vma); __anon_vma_link(vma);
} }
...@@ -312,7 +310,10 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -312,7 +310,10 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
if (mapping) if (mapping)
spin_lock(&mapping->i_mmap_lock); spin_lock(&mapping->i_mmap_lock);
anon_vma_lock(vma); anon_vma_lock(vma);
__vma_link(mm, vma, prev, rb_link, rb_parent); __vma_link(mm, vma, prev, rb_link, rb_parent);
__vma_link_file(vma);
anon_vma_unlock(vma); anon_vma_unlock(vma);
if (mapping) if (mapping)
spin_unlock(&mapping->i_mmap_lock); spin_unlock(&mapping->i_mmap_lock);
...@@ -323,9 +324,9 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -323,9 +324,9 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
} }
/* /*
* Insert vm structure into process list sorted by address and into the * Helper for vma_adjust in the split_vma insert case:
* inode's i_mmap tree. The caller should hold mm->mmap_sem and * insert vm structure into list and rbtree and anon_vma,
* ->f_mappping->i_mmap_lock if vm_file is non-NULL. * but it has already been inserted into prio_tree earlier.
*/ */
static void static void
__insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
...@@ -337,9 +338,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) ...@@ -337,9 +338,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
if (__vma && __vma->vm_start < vma->vm_end) if (__vma && __vma->vm_start < vma->vm_end)
BUG(); BUG();
__vma_link(mm, vma, prev, rb_link, rb_parent); __vma_link(mm, vma, prev, rb_link, rb_parent);
mark_mm_hugetlb(mm, vma);
mm->map_count++; mm->map_count++;
validate_mm(mm);
} }
static inline void static inline void
...@@ -403,6 +402,15 @@ again: remove_next = 1 + (end > next->vm_end); ...@@ -403,6 +402,15 @@ again: remove_next = 1 + (end > next->vm_end);
if (!(vma->vm_flags & VM_NONLINEAR)) if (!(vma->vm_flags & VM_NONLINEAR))
root = &mapping->i_mmap; root = &mapping->i_mmap;
spin_lock(&mapping->i_mmap_lock); spin_lock(&mapping->i_mmap_lock);
if (insert) {
/*
* Put into prio_tree now, so instantiated pages
* are visible to arm/parisc __flush_dcache_page
* throughout; but we cannot insert into address
* space until vma start or end is updated.
*/
__vma_link_file(insert);
}
} }
/* /*
...@@ -1463,6 +1471,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, ...@@ -1463,6 +1471,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
/* most fields are the same, copy all, and then fixup */ /* most fields are the same, copy all, and then fixup */
*new = *vma; *new = *vma;
vma_prio_tree_init(new);
if (new_below) if (new_below)
new->vm_end = addr; new->vm_end = addr;
...@@ -1775,6 +1784,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, ...@@ -1775,6 +1784,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (new_vma) { if (new_vma) {
*new_vma = *vma; *new_vma = *vma;
vma_prio_tree_init(new_vma);
pol = mpol_copy(vma_policy(vma)); pol = mpol_copy(vma_policy(vma));
if (IS_ERR(pol)) { if (IS_ERR(pol)) {
kmem_cache_free(vm_area_cachep, new_vma); kmem_cache_free(vm_area_cachep, new_vma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment