Commit c4088ebd authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

mm: convert the rest to new page table lock api

Only trivial cases left. Let's convert them altogether.
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tested-by: default avatarAlex Thorlton <athorlton@sgi.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "Eric W . Biederman" <ebiederm@xmission.com>
Cc: "Paul E . McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Dave Jones <davej@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Robin Holt <robinmholt@gmail.com>
Cc: Sedat Dilek <sedat.dilek@gmail.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent cb900f41
This diff is collapsed.
...@@ -550,6 +550,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -550,6 +550,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long address) pmd_t *pmd, unsigned long address)
{ {
spinlock_t *ptl;
pgtable_t new = pte_alloc_one(mm, address); pgtable_t new = pte_alloc_one(mm, address);
int wait_split_huge_page; int wait_split_huge_page;
if (!new) if (!new)
...@@ -570,7 +571,7 @@ int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -570,7 +571,7 @@ int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
*/ */
smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
spin_lock(&mm->page_table_lock); ptl = pmd_lock(mm, pmd);
wait_split_huge_page = 0; wait_split_huge_page = 0;
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
atomic_long_inc(&mm->nr_ptes); atomic_long_inc(&mm->nr_ptes);
...@@ -578,7 +579,7 @@ int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -578,7 +579,7 @@ int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
new = NULL; new = NULL;
} else if (unlikely(pmd_trans_splitting(*pmd))) } else if (unlikely(pmd_trans_splitting(*pmd)))
wait_split_huge_page = 1; wait_split_huge_page = 1;
spin_unlock(&mm->page_table_lock); spin_unlock(ptl);
if (new) if (new)
pte_free(mm, new); pte_free(mm, new);
if (wait_split_huge_page) if (wait_split_huge_page)
...@@ -1516,20 +1517,20 @@ struct page *follow_page_mask(struct vm_area_struct *vma, ...@@ -1516,20 +1517,20 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
split_huge_page_pmd(vma, address, pmd); split_huge_page_pmd(vma, address, pmd);
goto split_fallthrough; goto split_fallthrough;
} }
spin_lock(&mm->page_table_lock); ptl = pmd_lock(mm, pmd);
if (likely(pmd_trans_huge(*pmd))) { if (likely(pmd_trans_huge(*pmd))) {
if (unlikely(pmd_trans_splitting(*pmd))) { if (unlikely(pmd_trans_splitting(*pmd))) {
spin_unlock(&mm->page_table_lock); spin_unlock(ptl);
wait_split_huge_page(vma->anon_vma, pmd); wait_split_huge_page(vma->anon_vma, pmd);
} else { } else {
page = follow_trans_huge_pmd(vma, address, page = follow_trans_huge_pmd(vma, address,
pmd, flags); pmd, flags);
spin_unlock(&mm->page_table_lock); spin_unlock(ptl);
*page_mask = HPAGE_PMD_NR - 1; *page_mask = HPAGE_PMD_NR - 1;
goto out; goto out;
} }
} else } else
spin_unlock(&mm->page_table_lock); spin_unlock(ptl);
/* fall through */ /* fall through */
} }
split_fallthrough: split_fallthrough:
......
...@@ -1667,6 +1667,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, ...@@ -1667,6 +1667,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
unsigned long address, unsigned long address,
struct page *page, int node) struct page *page, int node)
{ {
spinlock_t *ptl;
unsigned long haddr = address & HPAGE_PMD_MASK; unsigned long haddr = address & HPAGE_PMD_MASK;
pg_data_t *pgdat = NODE_DATA(node); pg_data_t *pgdat = NODE_DATA(node);
int isolated = 0; int isolated = 0;
...@@ -1706,9 +1707,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, ...@@ -1706,9 +1707,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
WARN_ON(PageLRU(new_page)); WARN_ON(PageLRU(new_page));
/* Recheck the target PMD */ /* Recheck the target PMD */
spin_lock(&mm->page_table_lock); ptl = pmd_lock(mm, pmd);
if (unlikely(!pmd_same(*pmd, entry))) { if (unlikely(!pmd_same(*pmd, entry))) {
spin_unlock(&mm->page_table_lock); spin_unlock(ptl);
/* Reverse changes made by migrate_page_copy() */ /* Reverse changes made by migrate_page_copy() */
if (TestClearPageActive(new_page)) if (TestClearPageActive(new_page))
...@@ -1753,7 +1754,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, ...@@ -1753,7 +1754,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
* before it's fully transferred to the new page. * before it's fully transferred to the new page.
*/ */
mem_cgroup_end_migration(memcg, page, new_page, true); mem_cgroup_end_migration(memcg, page, new_page, true);
spin_unlock(&mm->page_table_lock); spin_unlock(ptl);
unlock_page(new_page); unlock_page(new_page);
unlock_page(page); unlock_page(page);
......
...@@ -151,7 +151,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, ...@@ -151,7 +151,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable) pgtable_t pgtable)
{ {
assert_spin_locked(&mm->page_table_lock); assert_spin_locked(pmd_lockptr(mm, pmdp));
/* FIFO */ /* FIFO */
if (!pmd_huge_pte(mm, pmdp)) if (!pmd_huge_pte(mm, pmdp))
...@@ -170,7 +170,7 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) ...@@ -170,7 +170,7 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
{ {
pgtable_t pgtable; pgtable_t pgtable;
assert_spin_locked(&mm->page_table_lock); assert_spin_locked(pmd_lockptr(mm, pmdp));
/* FIFO */ /* FIFO */
pgtable = pmd_huge_pte(mm, pmdp); pgtable = pmd_huge_pte(mm, pmdp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment