Commit 918e556e authored by David Howells's avatar David Howells Committed by Linus Torvalds

NOMMU: Lock i_mmap_mutex for access to the VMA prio list

Lock i_mmap_mutex for access to the VMA prio list to prevent concurrent
access.  Currently, certain parts of the mmap handling are protected by
the region mutex, but not all.
Reported-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Acked-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
cc: stable@vger.kernel.org
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 37e79cbf
...@@ -696,9 +696,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) ...@@ -696,9 +696,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
if (vma->vm_file) { if (vma->vm_file) {
mapping = vma->vm_file->f_mapping; mapping = vma->vm_file->f_mapping;
mutex_lock(&mapping->i_mmap_mutex);
flush_dcache_mmap_lock(mapping); flush_dcache_mmap_lock(mapping);
vma_prio_tree_insert(vma, &mapping->i_mmap); vma_prio_tree_insert(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping); flush_dcache_mmap_unlock(mapping);
mutex_unlock(&mapping->i_mmap_mutex);
} }
/* add the VMA to the tree */ /* add the VMA to the tree */
...@@ -760,9 +762,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) ...@@ -760,9 +762,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
if (vma->vm_file) { if (vma->vm_file) {
mapping = vma->vm_file->f_mapping; mapping = vma->vm_file->f_mapping;
mutex_lock(&mapping->i_mmap_mutex);
flush_dcache_mmap_lock(mapping); flush_dcache_mmap_lock(mapping);
vma_prio_tree_remove(vma, &mapping->i_mmap); vma_prio_tree_remove(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping); flush_dcache_mmap_unlock(mapping);
mutex_unlock(&mapping->i_mmap_mutex);
} }
/* remove from the MM's tree and list */ /* remove from the MM's tree and list */
...@@ -2052,6 +2056,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, ...@@ -2052,6 +2056,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
down_write(&nommu_region_sem); down_write(&nommu_region_sem);
mutex_lock(&inode->i_mapping->i_mmap_mutex);
/* search for VMAs that fall within the dead zone */ /* search for VMAs that fall within the dead zone */
vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
...@@ -2059,6 +2064,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, ...@@ -2059,6 +2064,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
/* found one - only interested if it's shared out of the page /* found one - only interested if it's shared out of the page
* cache */ * cache */
if (vma->vm_flags & VM_SHARED) { if (vma->vm_flags & VM_SHARED) {
mutex_unlock(&inode->i_mapping->i_mmap_mutex);
up_write(&nommu_region_sem); up_write(&nommu_region_sem);
return -ETXTBSY; /* not quite true, but near enough */ return -ETXTBSY; /* not quite true, but near enough */
} }
...@@ -2086,6 +2092,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, ...@@ -2086,6 +2092,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
} }
} }
mutex_unlock(&inode->i_mapping->i_mmap_mutex);
up_write(&nommu_region_sem); up_write(&nommu_region_sem);
return 0; return 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment