Commit a0f7a756 authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds

mm/rmap.c: fix pgoff calculation to handle hugepage correctly

I triggered VM_BUG_ON() in vma_address() when I tried to migrate an
anonymous hugepage with mbind() in the kernel v3.16-rc3.  This is
because pgoff's calculation in rmap_walk_anon() fails to consider
compound_order() only to have an incorrect value.

This patch introduces page_to_pgoff(), which gets the page's offset in
PAGE_CACHE_SIZE.

Kirill pointed out that page cache tree should natively handle
hugepages, and in order to make hugetlbfs fit it, page->index of
hugetlbfs page should be in PAGE_CACHE_SIZE.  This is beyond this patch,
but page_to_pgoff() contains the point to be fixed in a single function.
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent aed8adb7
...@@ -398,6 +398,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping, ...@@ -398,6 +398,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
return read_cache_page(mapping, index, filler, data); return read_cache_page(mapping, index, filler, data);
} }
/*
* Get the offset in PAGE_SIZE.
* (TODO: hugepage should have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_pgoff(struct page *page)
{
if (unlikely(PageHeadHuge(page)))
return page->index << compound_order(page);
else
return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
}
/* /*
* Return byte-offset into filesystem object for page. * Return byte-offset into filesystem object for page.
*/ */
......
...@@ -435,7 +435,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, ...@@ -435,7 +435,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
if (av == NULL) /* Not actually mapped anymore */ if (av == NULL) /* Not actually mapped anymore */
return; return;
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); pgoff = page_to_pgoff(page);
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
for_each_process (tsk) { for_each_process (tsk) {
struct anon_vma_chain *vmac; struct anon_vma_chain *vmac;
...@@ -469,7 +469,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, ...@@ -469,7 +469,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
mutex_lock(&mapping->i_mmap_mutex); mutex_lock(&mapping->i_mmap_mutex);
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
for_each_process(tsk) { for_each_process(tsk) {
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); pgoff_t pgoff = page_to_pgoff(page);
struct task_struct *t = task_early_kill(tsk, force_early); struct task_struct *t = task_early_kill(tsk, force_early);
if (!t) if (!t)
......
...@@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma) ...@@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
static inline unsigned long static inline unsigned long
__vma_address(struct page *page, struct vm_area_struct *vma) __vma_address(struct page *page, struct vm_area_struct *vma)
{ {
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); pgoff_t pgoff = page_to_pgoff(page);
if (unlikely(is_vm_hugetlb_page(vma)))
pgoff = page->index << huge_page_order(page_hstate(page));
return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
} }
...@@ -1639,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page, ...@@ -1639,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
{ {
struct anon_vma *anon_vma; struct anon_vma *anon_vma;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); pgoff_t pgoff = page_to_pgoff(page);
struct anon_vma_chain *avc; struct anon_vma_chain *avc;
int ret = SWAP_AGAIN; int ret = SWAP_AGAIN;
...@@ -1680,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) ...@@ -1680,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << compound_order(page); pgoff_t pgoff = page_to_pgoff(page);
struct vm_area_struct *vma; struct vm_area_struct *vma;
int ret = SWAP_AGAIN; int ret = SWAP_AGAIN;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment