Commit 025c5b24 authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds

thp: optimize away unnecessary page table locking

Currently when we check if we can handle thp as it is or we need to split
it into regular sized pages, we hold page table lock prior to check
whether a given pmd is mapping thp or not.  Because of this, when it's not
"huge pmd" we suffer from unnecessary lock/unlock overhead.  To remove it,
this patch introduces a optimized check function and replace several
similar logics with it.

[akpm@linux-foundation.org: checkpatch fixes]
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5aaabe83
...@@ -394,20 +394,11 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ...@@ -394,20 +394,11 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pte_t *pte; pte_t *pte;
spinlock_t *ptl; spinlock_t *ptl;
spin_lock(&walk->mm->page_table_lock); if (pmd_trans_huge_lock(pmd, vma) == 1) {
if (pmd_trans_huge(*pmd)) { smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
if (pmd_trans_splitting(*pmd)) {
spin_unlock(&walk->mm->page_table_lock);
wait_split_huge_page(vma->anon_vma, pmd);
} else {
smaps_pte_entry(*(pte_t *)pmd, addr,
HPAGE_PMD_SIZE, walk);
spin_unlock(&walk->mm->page_table_lock);
mss->anonymous_thp += HPAGE_PMD_SIZE;
return 0;
}
} else {
spin_unlock(&walk->mm->page_table_lock); spin_unlock(&walk->mm->page_table_lock);
mss->anonymous_thp += HPAGE_PMD_SIZE;
return 0;
} }
if (pmd_trans_unstable(pmd)) if (pmd_trans_unstable(pmd))
...@@ -705,26 +696,19 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ...@@ -705,26 +696,19 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
/* find the first VMA at or above 'addr' */ /* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr); vma = find_vma(walk->mm, addr);
spin_lock(&walk->mm->page_table_lock); spin_lock(&walk->mm->page_table_lock);
if (pmd_trans_huge(*pmd)) { if (pmd_trans_huge_lock(pmd, vma) == 1) {
if (pmd_trans_splitting(*pmd)) { for (; addr != end; addr += PAGE_SIZE) {
spin_unlock(&walk->mm->page_table_lock); unsigned long offset;
wait_split_huge_page(vma->anon_vma, pmd);
} else { offset = (addr & ~PAGEMAP_WALK_MASK) >>
for (; addr != end; addr += PAGE_SIZE) { PAGE_SHIFT;
unsigned long offset; pfn = thp_pmd_to_pagemap_entry(*pmd, offset);
err = add_to_pagemap(addr, pfn, pm);
offset = (addr & ~PAGEMAP_WALK_MASK) >> if (err)
PAGE_SHIFT; break;
pfn = thp_pmd_to_pagemap_entry(*pmd, offset);
err = add_to_pagemap(addr, pfn, pm);
if (err)
break;
}
spin_unlock(&walk->mm->page_table_lock);
return err;
} }
} else {
spin_unlock(&walk->mm->page_table_lock); spin_unlock(&walk->mm->page_table_lock);
return err;
} }
for (; addr != end; addr += PAGE_SIZE) { for (; addr != end; addr += PAGE_SIZE) {
...@@ -992,24 +976,17 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, ...@@ -992,24 +976,17 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
pte_t *pte; pte_t *pte;
md = walk->private; md = walk->private;
spin_lock(&walk->mm->page_table_lock);
if (pmd_trans_huge(*pmd)) { if (pmd_trans_huge_lock(pmd, md->vma) == 1) {
if (pmd_trans_splitting(*pmd)) { pte_t huge_pte = *(pte_t *)pmd;
spin_unlock(&walk->mm->page_table_lock); struct page *page;
wait_split_huge_page(md->vma->anon_vma, pmd);
} else { page = can_gather_numa_stats(huge_pte, md->vma, addr);
pte_t huge_pte = *(pte_t *)pmd; if (page)
struct page *page; gather_stats(page, md, pte_dirty(huge_pte),
HPAGE_PMD_SIZE/PAGE_SIZE);
page = can_gather_numa_stats(huge_pte, md->vma, addr);
if (page)
gather_stats(page, md, pte_dirty(huge_pte),
HPAGE_PMD_SIZE/PAGE_SIZE);
spin_unlock(&walk->mm->page_table_lock);
return 0;
}
} else {
spin_unlock(&walk->mm->page_table_lock); spin_unlock(&walk->mm->page_table_lock);
return 0;
} }
if (pmd_trans_unstable(pmd)) if (pmd_trans_unstable(pmd))
......
...@@ -113,6 +113,18 @@ extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, ...@@ -113,6 +113,18 @@ extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start, unsigned long start,
unsigned long end, unsigned long end,
long adjust_next); long adjust_next);
extern int __pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma);
/* mmap_sem must be held on entry */
static inline int pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
if (pmd_trans_huge(*pmd))
return __pmd_trans_huge_lock(pmd, vma);
else
return 0;
}
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start, unsigned long start,
unsigned long end, unsigned long end,
...@@ -176,6 +188,11 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, ...@@ -176,6 +188,11 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
long adjust_next) long adjust_next)
{ {
} }
static inline int pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_HUGE_MM_H */ #endif /* _LINUX_HUGE_MM_H */
...@@ -1031,32 +1031,23 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1031,32 +1031,23 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
{ {
int ret = 0; int ret = 0;
spin_lock(&tlb->mm->page_table_lock); if (__pmd_trans_huge_lock(pmd, vma) == 1) {
if (likely(pmd_trans_huge(*pmd))) { struct page *page;
if (unlikely(pmd_trans_splitting(*pmd))) { pgtable_t pgtable;
spin_unlock(&tlb->mm->page_table_lock); pgtable = get_pmd_huge_pte(tlb->mm);
wait_split_huge_page(vma->anon_vma, page = pmd_page(*pmd);
pmd); pmd_clear(pmd);
} else { tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
struct page *page; page_remove_rmap(page);
pgtable_t pgtable; VM_BUG_ON(page_mapcount(page) < 0);
pgtable = get_pmd_huge_pte(tlb->mm); add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
page = pmd_page(*pmd); VM_BUG_ON(!PageHead(page));
pmd_clear(pmd); tlb->mm->nr_ptes--;
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
page_remove_rmap(page);
VM_BUG_ON(page_mapcount(page) < 0);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
VM_BUG_ON(!PageHead(page));
tlb->mm->nr_ptes--;
spin_unlock(&tlb->mm->page_table_lock);
tlb_remove_page(tlb, page);
pte_free(tlb->mm, pgtable);
ret = 1;
}
} else
spin_unlock(&tlb->mm->page_table_lock); spin_unlock(&tlb->mm->page_table_lock);
tlb_remove_page(tlb, page);
pte_free(tlb->mm, pgtable);
ret = 1;
}
return ret; return ret;
} }
...@@ -1066,21 +1057,15 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1066,21 +1057,15 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
{ {
int ret = 0; int ret = 0;
spin_lock(&vma->vm_mm->page_table_lock); if (__pmd_trans_huge_lock(pmd, vma) == 1) {
if (likely(pmd_trans_huge(*pmd))) { /*
ret = !pmd_trans_splitting(*pmd); * All logical pages in the range are present
spin_unlock(&vma->vm_mm->page_table_lock); * if backed by a huge page.
if (unlikely(!ret)) */
wait_split_huge_page(vma->anon_vma, pmd);
else {
/*
* All logical pages in the range are present
* if backed by a huge page.
*/
memset(vec, 1, (end - addr) >> PAGE_SHIFT);
}
} else
spin_unlock(&vma->vm_mm->page_table_lock); spin_unlock(&vma->vm_mm->page_table_lock);
memset(vec, 1, (end - addr) >> PAGE_SHIFT);
ret = 1;
}
return ret; return ret;
} }
...@@ -1110,20 +1095,11 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, ...@@ -1110,20 +1095,11 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
goto out; goto out;
} }
spin_lock(&mm->page_table_lock); ret = __pmd_trans_huge_lock(old_pmd, vma);
if (likely(pmd_trans_huge(*old_pmd))) { if (ret == 1) {
if (pmd_trans_splitting(*old_pmd)) { pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
spin_unlock(&mm->page_table_lock); VM_BUG_ON(!pmd_none(*new_pmd));
wait_split_huge_page(vma->anon_vma, old_pmd); set_pmd_at(mm, new_addr, new_pmd, pmd);
ret = -1;
} else {
pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
VM_BUG_ON(!pmd_none(*new_pmd));
set_pmd_at(mm, new_addr, new_pmd, pmd);
spin_unlock(&mm->page_table_lock);
ret = 1;
}
} else {
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
} }
out: out:
...@@ -1136,24 +1112,41 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1136,24 +1112,41 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
int ret = 0; int ret = 0;
spin_lock(&mm->page_table_lock); if (__pmd_trans_huge_lock(pmd, vma) == 1) {
pmd_t entry;
entry = pmdp_get_and_clear(mm, addr, pmd);
entry = pmd_modify(entry, newprot);
set_pmd_at(mm, addr, pmd, entry);
spin_unlock(&vma->vm_mm->page_table_lock);
ret = 1;
}
return ret;
}
/*
* Returns 1 if a given pmd maps a stable (not under splitting) thp.
* Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
*
* Note that if it returns 1, this routine returns without unlocking page
* table locks. So callers must unlock them.
*/
int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
{
spin_lock(&vma->vm_mm->page_table_lock);
if (likely(pmd_trans_huge(*pmd))) { if (likely(pmd_trans_huge(*pmd))) {
if (unlikely(pmd_trans_splitting(*pmd))) { if (unlikely(pmd_trans_splitting(*pmd))) {
spin_unlock(&mm->page_table_lock); spin_unlock(&vma->vm_mm->page_table_lock);
wait_split_huge_page(vma->anon_vma, pmd); wait_split_huge_page(vma->anon_vma, pmd);
return -1;
} else { } else {
pmd_t entry; /* Thp mapped by 'pmd' is stable, so we can
* handle it as it is. */
entry = pmdp_get_and_clear(mm, addr, pmd); return 1;
entry = pmd_modify(entry, newprot);
set_pmd_at(mm, addr, pmd, entry);
spin_unlock(&vma->vm_mm->page_table_lock);
ret = 1;
} }
} else }
spin_unlock(&vma->vm_mm->page_table_lock); spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
return ret;
} }
pmd_t *page_check_address_pmd(struct page *page, pmd_t *page_check_address_pmd(struct page *page,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment