Commit 746b18d4 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

mm: use refcounts for page_lock_anon_vma()

Convert page_lock_anon_vma() over to use refcounts.  This is done to
prepare for the conversion of anon_vma from spinlock to mutex.

Sadly this inceases the cost of page_lock_anon_vma() from one to two
atomics, a follow up patch addresses this, lets keep that simple for now.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6111e4ca
...@@ -721,15 +721,11 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, ...@@ -721,15 +721,11 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
* Only page_lock_anon_vma() understands the subtleties of * Only page_lock_anon_vma() understands the subtleties of
* getting a hold on an anon_vma from outside one of its mms. * getting a hold on an anon_vma from outside one of its mms.
*/ */
anon_vma = page_lock_anon_vma(page); anon_vma = page_get_anon_vma(page);
if (anon_vma) { if (anon_vma) {
/* /*
* Take a reference count on the anon_vma if the * Anon page
* page is mapped so that it is guaranteed to
* exist when the page is remapped later
*/ */
get_anon_vma(anon_vma);
page_unlock_anon_vma(anon_vma);
} else if (PageSwapCache(page)) { } else if (PageSwapCache(page)) {
/* /*
* We cannot be sure that the anon_vma of an unmapped * We cannot be sure that the anon_vma of an unmapped
...@@ -857,13 +853,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -857,13 +853,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
lock_page(hpage); lock_page(hpage);
} }
if (PageAnon(hpage)) { if (PageAnon(hpage))
anon_vma = page_lock_anon_vma(hpage); anon_vma = page_get_anon_vma(hpage);
if (anon_vma) {
get_anon_vma(anon_vma);
page_unlock_anon_vma(anon_vma);
}
}
try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
......
...@@ -337,9 +337,9 @@ void __init anon_vma_init(void) ...@@ -337,9 +337,9 @@ void __init anon_vma_init(void)
* that the anon_vma pointer from page->mapping is valid if there is a * that the anon_vma pointer from page->mapping is valid if there is a
* mapcount, we can dereference the anon_vma after observing those. * mapcount, we can dereference the anon_vma after observing those.
*/ */
struct anon_vma *page_lock_anon_vma(struct page *page) struct anon_vma *page_get_anon_vma(struct page *page)
{ {
struct anon_vma *anon_vma, *root_anon_vma; struct anon_vma *anon_vma = NULL;
unsigned long anon_mapping; unsigned long anon_mapping;
rcu_read_lock(); rcu_read_lock();
...@@ -350,30 +350,42 @@ struct anon_vma *page_lock_anon_vma(struct page *page) ...@@ -350,30 +350,42 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
goto out; goto out;
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
root_anon_vma = ACCESS_ONCE(anon_vma->root); if (!atomic_inc_not_zero(&anon_vma->refcount)) {
spin_lock(&root_anon_vma->lock); anon_vma = NULL;
goto out;
}
/* /*
* If this page is still mapped, then its anon_vma cannot have been * If this page is still mapped, then its anon_vma cannot have been
* freed. But if it has been unmapped, we have no security against * freed. But if it has been unmapped, we have no security against the
* the anon_vma structure being freed and reused (for another anon_vma: * anon_vma structure being freed and reused (for another anon_vma:
* SLAB_DESTROY_BY_RCU guarantees that - so the spin_lock above cannot * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
* corrupt): with anon_vma_prepare() or anon_vma_fork() redirecting * above cannot corrupt).
* anon_vma->root before page_unlock_anon_vma() is called to unlock.
*/ */
if (page_mapped(page)) if (!page_mapped(page)) {
return anon_vma; put_anon_vma(anon_vma);
anon_vma = NULL;
spin_unlock(&root_anon_vma->lock); }
out: out:
rcu_read_unlock(); rcu_read_unlock();
return NULL;
return anon_vma;
}
struct anon_vma *page_lock_anon_vma(struct page *page)
{
struct anon_vma *anon_vma = page_get_anon_vma(page);
if (anon_vma)
anon_vma_lock(anon_vma);
return anon_vma;
} }
void page_unlock_anon_vma(struct anon_vma *anon_vma) void page_unlock_anon_vma(struct anon_vma *anon_vma)
{ {
anon_vma_unlock(anon_vma); anon_vma_unlock(anon_vma);
rcu_read_unlock(); put_anon_vma(anon_vma);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment