Commit 2b575eb6 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

mm: convert anon_vma->lock to a mutex

Straightforward conversion of anon_vma->lock to a mutex.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Reviewed-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 746b18d4
......@@ -92,12 +92,8 @@ extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
#define wait_split_huge_page(__anon_vma, __pmd) \
do { \
pmd_t *____pmd = (__pmd); \
spin_unlock_wait(&(__anon_vma)->root->lock); \
/* \
* spin_unlock_wait() is just a loop in C and so the \
* CPU can reorder anything around it. \
*/ \
smp_mb(); \
anon_vma_lock(__anon_vma); \
anon_vma_unlock(__anon_vma); \
BUG_ON(pmd_trans_splitting(*____pmd) || \
pmd_trans_huge(*____pmd)); \
} while (0)
......
......@@ -150,7 +150,7 @@ struct mmu_notifier_ops {
* Therefore notifier chains can only be traversed when either
*
* 1. mmap_sem is held.
* 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->lock).
* 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->mutex).
* 3. No other concurrent thread can access the list (release)
*/
struct mmu_notifier {
......
......@@ -7,7 +7,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/memcontrol.h>
/*
......@@ -26,7 +26,7 @@
*/
struct anon_vma {
struct anon_vma *root; /* Root of this anon_vma tree */
spinlock_t lock; /* Serialize access to vma list */
struct mutex mutex; /* Serialize access to vma list */
/*
* The refcount is taken on an anon_vma when there is no
* guarantee that the vma of page tables will exist for
......@@ -64,7 +64,7 @@ struct anon_vma_chain {
struct vm_area_struct *vma;
struct anon_vma *anon_vma;
struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
struct list_head same_anon_vma; /* locked by anon_vma->lock */
struct list_head same_anon_vma; /* locked by anon_vma->mutex */
};
#ifdef CONFIG_MMU
......@@ -93,24 +93,24 @@ static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma)
spin_lock(&anon_vma->root->lock);
mutex_lock(&anon_vma->root->mutex);
}
static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma)
spin_unlock(&anon_vma->root->lock);
mutex_unlock(&anon_vma->root->mutex);
}
static inline void anon_vma_lock(struct anon_vma *anon_vma)
{
spin_lock(&anon_vma->root->lock);
mutex_lock(&anon_vma->root->mutex);
}
static inline void anon_vma_unlock(struct anon_vma *anon_vma)
{
spin_unlock(&anon_vma->root->lock);
mutex_unlock(&anon_vma->root->mutex);
}
/*
......
......@@ -1139,7 +1139,7 @@ static int __split_huge_page_splitting(struct page *page,
* We can't temporarily set the pmd to null in order
* to split it, the pmd must remain marked huge at all
* times or the VM won't take the pmd_trans_huge paths
* and it won't wait on the anon_vma->root->lock to
* and it won't wait on the anon_vma->root->mutex to
* serialize against split_huge_page*.
*/
pmdp_splitting_flush_notify(vma, address, pmd);
......@@ -1333,7 +1333,7 @@ static int __split_huge_page_map(struct page *page,
return ret;
}
/* must be called with anon_vma->root->lock hold */
/* must be called with anon_vma->root->mutex hold */
static void __split_huge_page(struct page *page,
struct anon_vma *anon_vma)
{
......
......@@ -2502,15 +2502,15 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
* The LSB of head.next can't change from under us
* because we hold the mm_all_locks_mutex.
*/
spin_lock_nest_lock(&anon_vma->root->lock, &mm->mmap_sem);
mutex_lock_nest_lock(&anon_vma->root->mutex, &mm->mmap_sem);
/*
* We can safely modify head.next after taking the
* anon_vma->root->lock. If some other vma in this mm shares
* anon_vma->root->mutex. If some other vma in this mm shares
* the same anon_vma we won't take it again.
*
* No need of atomic instructions here, head.next
* can't change from under us thanks to the
* anon_vma->root->lock.
* anon_vma->root->mutex.
*/
if (__test_and_set_bit(0, (unsigned long *)
&anon_vma->root->head.next))
......@@ -2559,7 +2559,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
* vma in this mm is backed by the same anon_vma or address_space.
*
* We can take all the locks in random order because the VM code
* taking i_mmap_mutex or anon_vma->lock outside the mmap_sem never
* taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never
* takes more than one of them in a row. Secondly we're protected
* against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
*
......@@ -2615,7 +2615,7 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
*
* No need of atomic instructions here, head.next
* can't change from under us until we release the
* anon_vma->root->lock.
* anon_vma->root->mutex.
*/
if (!__test_and_clear_bit(0, (unsigned long *)
&anon_vma->root->head.next))
......
......@@ -25,7 +25,7 @@
* mm->mmap_sem
* page->flags PG_locked (lock_page)
* mapping->i_mmap_mutex
* anon_vma->lock
* anon_vma->mutex
* mm->page_table_lock or pte_lock
* zone->lru_lock (in mark_page_accessed, isolate_lru_page)
* swap_lock (in swap_duplicate, swap_info_get)
......@@ -40,7 +40,7 @@
*
* (code doesn't rely on that order so it could be switched around)
* ->tasklist_lock
* anon_vma->lock (memory_failure, collect_procs_anon)
* anon_vma->mutex (memory_failure, collect_procs_anon)
* pte map lock
*/
......@@ -307,7 +307,7 @@ static void anon_vma_ctor(void *data)
{
struct anon_vma *anon_vma = data;
spin_lock_init(&anon_vma->lock);
mutex_init(&anon_vma->mutex);
atomic_set(&anon_vma->refcount, 0);
INIT_LIST_HEAD(&anon_vma->head);
}
......@@ -1143,7 +1143,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
/*
* We need mmap_sem locking, Otherwise VM_LOCKED check makes
* unstable result and race. Plus, We can't wait here because
* we now hold anon_vma->lock or mapping->i_mmap_mutex.
* we now hold anon_vma->mutex or mapping->i_mmap_mutex.
* if trylock failed, the page remain in evictable lru and later
* vmscan could retry to move the page to unevictable lru if the
* page is actually mlocked.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment