Commit 86c2ad19 authored by Michel Lespinasse's avatar Michel Lespinasse Committed by Linus Torvalds

mm rmap: remove vma_address check for address inside vma

In file and anon rmap, we use interval trees to find potentially relevant
vmas and then call vma_address() to find the virtual address the given
page might be found at in these vmas.  vma_address() used to include a
check that the returned address falls within the limits of the vma, but
this check isn't necessary now that we always use interval trees in rmap:
the interval tree just doesn't return any vmas which this check would find
to be irrelevant.  As a result, we can replace the use of -EFAULT error
code (which then needed to be checked in every call site) with a
VM_BUG_ON().
Signed-off-by: default avatarMichel Lespinasse <walken@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Daniel Santos <daniel.santos@pobox.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bf181b9f
...@@ -1386,8 +1386,6 @@ static void __split_huge_page(struct page *page, ...@@ -1386,8 +1386,6 @@ static void __split_huge_page(struct page *page,
struct vm_area_struct *vma = avc->vma; struct vm_area_struct *vma = avc->vma;
unsigned long addr = vma_address(page, vma); unsigned long addr = vma_address(page, vma);
BUG_ON(is_vma_temporary_stack(vma)); BUG_ON(is_vma_temporary_stack(vma));
if (addr == -EFAULT)
continue;
mapcount += __split_huge_page_splitting(page, vma, addr); mapcount += __split_huge_page_splitting(page, vma, addr);
} }
/* /*
...@@ -1412,8 +1410,6 @@ static void __split_huge_page(struct page *page, ...@@ -1412,8 +1410,6 @@ static void __split_huge_page(struct page *page,
struct vm_area_struct *vma = avc->vma; struct vm_area_struct *vma = avc->vma;
unsigned long addr = vma_address(page, vma); unsigned long addr = vma_address(page, vma);
BUG_ON(is_vma_temporary_stack(vma)); BUG_ON(is_vma_temporary_stack(vma));
if (addr == -EFAULT)
continue;
mapcount2 += __split_huge_page_map(page, vma, addr); mapcount2 += __split_huge_page_map(page, vma, addr);
} }
if (mapcount != mapcount2) if (mapcount != mapcount2)
......
...@@ -510,22 +510,26 @@ void page_unlock_anon_vma(struct anon_vma *anon_vma) ...@@ -510,22 +510,26 @@ void page_unlock_anon_vma(struct anon_vma *anon_vma)
/* /*
* At what user virtual address is page expected in @vma? * At what user virtual address is page expected in @vma?
* Returns virtual address or -EFAULT if page's index/offset is not
* within the range mapped the @vma.
*/ */
inline unsigned long static inline unsigned long
vma_address(struct page *page, struct vm_area_struct *vma) __vma_address(struct page *page, struct vm_area_struct *vma)
{ {
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
unsigned long address;
if (unlikely(is_vm_hugetlb_page(vma))) if (unlikely(is_vm_hugetlb_page(vma)))
pgoff = page->index << huge_page_order(page_hstate(page)); pgoff = page->index << huge_page_order(page_hstate(page));
address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
/* page should be within @vma mapping range */ }
return -EFAULT;
} inline unsigned long
vma_address(struct page *page, struct vm_area_struct *vma)
{
unsigned long address = __vma_address(page, vma);
/* page should be within @vma mapping range */
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
return address; return address;
} }
...@@ -535,6 +539,7 @@ vma_address(struct page *page, struct vm_area_struct *vma) ...@@ -535,6 +539,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
*/ */
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{ {
unsigned long address;
if (PageAnon(page)) { if (PageAnon(page)) {
struct anon_vma *page__anon_vma = page_anon_vma(page); struct anon_vma *page__anon_vma = page_anon_vma(page);
/* /*
...@@ -550,7 +555,10 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) ...@@ -550,7 +555,10 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
return -EFAULT; return -EFAULT;
} else } else
return -EFAULT; return -EFAULT;
return vma_address(page, vma); address = __vma_address(page, vma);
if (unlikely(address < vma->vm_start || address >= vma->vm_end))
return -EFAULT;
return address;
} }
/* /*
...@@ -624,8 +632,8 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) ...@@ -624,8 +632,8 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
pte_t *pte; pte_t *pte;
spinlock_t *ptl; spinlock_t *ptl;
address = vma_address(page, vma); address = __vma_address(page, vma);
if (address == -EFAULT) /* out of vma range */ if (unlikely(address < vma->vm_start || address >= vma->vm_end))
return 0; return 0;
pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
if (!pte) /* the page is not in this mm */ if (!pte) /* the page is not in this mm */
...@@ -732,8 +740,6 @@ static int page_referenced_anon(struct page *page, ...@@ -732,8 +740,6 @@ static int page_referenced_anon(struct page *page,
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
struct vm_area_struct *vma = avc->vma; struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(page, vma); unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
/* /*
* If we are reclaiming on behalf of a cgroup, skip * If we are reclaiming on behalf of a cgroup, skip
* counting on behalf of references from different * counting on behalf of references from different
...@@ -799,8 +805,6 @@ static int page_referenced_file(struct page *page, ...@@ -799,8 +805,6 @@ static int page_referenced_file(struct page *page,
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma); unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
/* /*
* If we are reclaiming on behalf of a cgroup, skip * If we are reclaiming on behalf of a cgroup, skip
* counting on behalf of references from different * counting on behalf of references from different
...@@ -904,8 +908,6 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page) ...@@ -904,8 +908,6 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
if (vma->vm_flags & VM_SHARED) { if (vma->vm_flags & VM_SHARED) {
unsigned long address = vma_address(page, vma); unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
ret += page_mkclean_one(page, vma, address); ret += page_mkclean_one(page, vma, address);
} }
} }
...@@ -1468,8 +1470,6 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) ...@@ -1468,8 +1470,6 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
continue; continue;
address = vma_address(page, vma); address = vma_address(page, vma);
if (address == -EFAULT)
continue;
ret = try_to_unmap_one(page, vma, address, flags); ret = try_to_unmap_one(page, vma, address, flags);
if (ret != SWAP_AGAIN || !page_mapped(page)) if (ret != SWAP_AGAIN || !page_mapped(page))
break; break;
...@@ -1508,8 +1508,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) ...@@ -1508,8 +1508,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
mutex_lock(&mapping->i_mmap_mutex); mutex_lock(&mapping->i_mmap_mutex);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma); unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
ret = try_to_unmap_one(page, vma, address, flags); ret = try_to_unmap_one(page, vma, address, flags);
if (ret != SWAP_AGAIN || !page_mapped(page)) if (ret != SWAP_AGAIN || !page_mapped(page))
goto out; goto out;
...@@ -1684,8 +1682,6 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, ...@@ -1684,8 +1682,6 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
struct vm_area_struct *vma = avc->vma; struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(page, vma); unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
ret = rmap_one(page, vma, address, arg); ret = rmap_one(page, vma, address, arg);
if (ret != SWAP_AGAIN) if (ret != SWAP_AGAIN)
break; break;
...@@ -1707,8 +1703,6 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, ...@@ -1707,8 +1703,6 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
mutex_lock(&mapping->i_mmap_mutex); mutex_lock(&mapping->i_mmap_mutex);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma); unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
ret = rmap_one(page, vma, address, arg); ret = rmap_one(page, vma, address, arg);
if (ret != SWAP_AGAIN) if (ret != SWAP_AGAIN)
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment