Commit b9773199 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

rmap: introduce rmap_walk_locked()

This patchset rewrites freeze_page() and unfreeze_page() using
try_to_unmap() and remove_migration_ptes().  Result is much simpler, but
somewhat slower.

Migration 8GiB worth of PMD-mapped THP:

  Baseline	20.21 +/- 0.393
  Patched	20.73 +/- 0.082
  Slowdown	1.03x

It's 3% slower, comparing to 14% in v1.  I don't it should be a stopper.

Splitting of PTE-mapped pages slowed more.  But this is not a common
case.

Migration 8GiB worth of PMD-mapped THP:

  Baseline	20.39 +/- 0.225
  Patched	22.43 +/- 0.496
  Slowdown	1.10x

rmap_walk_locked() is the same as rmap_walk(), but the caller takes care
of the relevant rmap lock.

This is preparation for switching THP splitting from custom rmap walk in
freeze_page()/unfreeze_page() to the generic one.

There is no support for KSM pages for now: not clear which lock is
implied.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 99490f16
...@@ -266,6 +266,7 @@ struct rmap_walk_control { ...@@ -266,6 +266,7 @@ struct rmap_walk_control {
}; };
int rmap_walk(struct page *page, struct rmap_walk_control *rwc); int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */ #else /* !CONFIG_MMU */
......
...@@ -1715,14 +1715,21 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page, ...@@ -1715,14 +1715,21 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
* vm_flags for that VMA. That should be OK, because that vma shouldn't be * vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED. * LOCKED.
*/ */
static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
bool locked)
{ {
struct anon_vma *anon_vma; struct anon_vma *anon_vma;
pgoff_t pgoff; pgoff_t pgoff;
struct anon_vma_chain *avc; struct anon_vma_chain *avc;
int ret = SWAP_AGAIN; int ret = SWAP_AGAIN;
if (locked) {
anon_vma = page_anon_vma(page);
/* anon_vma disappear under us? */
VM_BUG_ON_PAGE(!anon_vma, page);
} else {
anon_vma = rmap_walk_anon_lock(page, rwc); anon_vma = rmap_walk_anon_lock(page, rwc);
}
if (!anon_vma) if (!anon_vma)
return ret; return ret;
...@@ -1742,6 +1749,8 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) ...@@ -1742,6 +1749,8 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
if (rwc->done && rwc->done(page)) if (rwc->done && rwc->done(page))
break; break;
} }
if (!locked)
anon_vma_unlock_read(anon_vma); anon_vma_unlock_read(anon_vma);
return ret; return ret;
} }
...@@ -1759,9 +1768,10 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) ...@@ -1759,9 +1768,10 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
* vm_flags for that VMA. That should be OK, because that vma shouldn't be * vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED. * LOCKED.
*/ */
static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
bool locked)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = page_mapping(page);
pgoff_t pgoff; pgoff_t pgoff;
struct vm_area_struct *vma; struct vm_area_struct *vma;
int ret = SWAP_AGAIN; int ret = SWAP_AGAIN;
...@@ -1778,6 +1788,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) ...@@ -1778,6 +1788,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
return ret; return ret;
pgoff = page_to_pgoff(page); pgoff = page_to_pgoff(page);
if (!locked)
i_mmap_lock_read(mapping); i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma); unsigned long address = vma_address(page, vma);
...@@ -1795,6 +1806,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) ...@@ -1795,6 +1806,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
} }
done: done:
if (!locked)
i_mmap_unlock_read(mapping); i_mmap_unlock_read(mapping);
return ret; return ret;
} }
...@@ -1804,9 +1816,20 @@ int rmap_walk(struct page *page, struct rmap_walk_control *rwc) ...@@ -1804,9 +1816,20 @@ int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
if (unlikely(PageKsm(page))) if (unlikely(PageKsm(page)))
return rmap_walk_ksm(page, rwc); return rmap_walk_ksm(page, rwc);
else if (PageAnon(page)) else if (PageAnon(page))
return rmap_walk_anon(page, rwc); return rmap_walk_anon(page, rwc, false);
else
return rmap_walk_file(page, rwc, false);
}
/* Like rmap_walk, but caller holds relevant rmap lock */
int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
{
/* no ksm support for now */
VM_BUG_ON_PAGE(PageKsm(page), page);
if (PageAnon(page))
return rmap_walk_anon(page, rwc, true);
else else
return rmap_walk_file(page, rwc); return rmap_walk_file(page, rwc, true);
} }
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment