Commit b8d3c4c3 authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds

mm/huge_memory.c: don't split THP page when MADV_FREE syscall is called

We don't need to split THP page when MADV_FREE syscall is called if
[start, len] is aligned with THP size.  The split could be done when VM
decide to free it in reclaim path if memory pressure is heavy.  With
that, we could avoid unnecessary THP split.

For the feature, this patch changes pte dirtness marking logic of THP.
Now, it marks every ptes of pages dirty unconditionally in splitting,
which makes MADV_FREE void.  So, instead, this patch propagates pmd
dirtiness to all pages via PG_dirty and restores pte dirtiness from
PG_dirty.  With this, if pmd is clean(ie, MADV_FREEed) when split
happens(e,g, shrink_page_list), all of pages are clean too so we could
discard them.
Signed-off-by: default avatarMinchan Kim <minchan@kernel.org>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Hugh Dickins <hughd@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Shaohua Li <shli@kernel.org>
Cc: <yalin.wang2010@gmail.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chen Gang <gang.chen.5i5j@gmail.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Daniel Micay <danielmicay@gmail.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Helge Deller <deller@gmx.de>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Jason Evans <je@fb.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mika Penttil <mika.penttila@nextfour.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Rik van Riel <riel@redhat.com>
Cc: Roland Dreier <roland@kernel.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Shaohua Li <shli@kernel.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 05ee26d9
...@@ -19,6 +19,9 @@ extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, ...@@ -19,6 +19,9 @@ extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
pmd_t *pmd, pmd_t *pmd,
unsigned int flags); unsigned int flags);
extern int madvise_free_huge_pmd(struct mmu_gather *tlb,
struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, unsigned long next);
extern int zap_huge_pmd(struct mmu_gather *tlb, extern int zap_huge_pmd(struct mmu_gather *tlb,
struct vm_area_struct *vma, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr); pmd_t *pmd, unsigned long addr);
......
...@@ -1501,6 +1501,77 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1501,6 +1501,77 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
return 0; return 0;
} }
int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, unsigned long next)
{
spinlock_t *ptl;
pmd_t orig_pmd;
struct page *page;
struct mm_struct *mm = tlb->mm;
int ret = 0;
if (!pmd_trans_huge_lock(pmd, vma, &ptl))
goto out;
orig_pmd = *pmd;
if (is_huge_zero_pmd(orig_pmd)) {
ret = 1;
goto out;
}
page = pmd_page(orig_pmd);
/*
* If other processes are mapping this page, we couldn't discard
* the page unless they all do MADV_FREE so let's skip the page.
*/
if (page_mapcount(page) != 1)
goto out;
if (!trylock_page(page))
goto out;
/*
* If user want to discard part-pages of THP, split it so MADV_FREE
* will deactivate only them.
*/
if (next - addr != HPAGE_PMD_SIZE) {
get_page(page);
spin_unlock(ptl);
if (split_huge_page(page)) {
put_page(page);
unlock_page(page);
goto out_unlocked;
}
put_page(page);
unlock_page(page);
ret = 1;
goto out_unlocked;
}
if (PageDirty(page))
ClearPageDirty(page);
unlock_page(page);
if (PageActive(page))
deactivate_page(page);
if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
tlb->fullmm);
orig_pmd = pmd_mkold(orig_pmd);
orig_pmd = pmd_mkclean(orig_pmd);
set_pmd_at(mm, addr, pmd, orig_pmd);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
}
ret = 1;
out:
spin_unlock(ptl);
out_unlocked:
return ret;
}
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr) pmd_t *pmd, unsigned long addr)
{ {
...@@ -2710,7 +2781,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -2710,7 +2781,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
struct page *page; struct page *page;
pgtable_t pgtable; pgtable_t pgtable;
pmd_t _pmd; pmd_t _pmd;
bool young, write; bool young, write, dirty;
int i; int i;
VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
...@@ -2734,6 +2805,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -2734,6 +2805,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
atomic_add(HPAGE_PMD_NR - 1, &page->_count); atomic_add(HPAGE_PMD_NR - 1, &page->_count);
write = pmd_write(*pmd); write = pmd_write(*pmd);
young = pmd_young(*pmd); young = pmd_young(*pmd);
dirty = pmd_dirty(*pmd);
pgtable = pgtable_trans_huge_withdraw(mm, pmd); pgtable = pgtable_trans_huge_withdraw(mm, pmd);
pmd_populate(mm, &_pmd, pgtable); pmd_populate(mm, &_pmd, pgtable);
...@@ -2751,12 +2823,14 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -2751,12 +2823,14 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
entry = swp_entry_to_pte(swp_entry); entry = swp_entry_to_pte(swp_entry);
} else { } else {
entry = mk_pte(page + i, vma->vm_page_prot); entry = mk_pte(page + i, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma); entry = maybe_mkwrite(entry, vma);
if (!write) if (!write)
entry = pte_wrprotect(entry); entry = pte_wrprotect(entry);
if (!young) if (!young)
entry = pte_mkold(entry); entry = pte_mkold(entry);
} }
if (dirty)
SetPageDirty(page + i);
pte = pte_offset_map(&_pmd, haddr); pte = pte_offset_map(&_pmd, haddr);
BUG_ON(!pte_none(*pte)); BUG_ON(!pte_none(*pte));
set_pte_at(mm, haddr, pte, entry); set_pte_at(mm, haddr, pte, entry);
...@@ -2962,6 +3036,8 @@ static void freeze_page_vma(struct vm_area_struct *vma, struct page *page, ...@@ -2962,6 +3036,8 @@ static void freeze_page_vma(struct vm_area_struct *vma, struct page *page,
continue; continue;
flush_cache_page(vma, address, page_to_pfn(page)); flush_cache_page(vma, address, page_to_pfn(page));
entry = ptep_clear_flush(vma, address, pte + i); entry = ptep_clear_flush(vma, address, pte + i);
if (pte_dirty(entry))
SetPageDirty(page);
swp_entry = make_migration_entry(page, pte_write(entry)); swp_entry = make_migration_entry(page, pte_write(entry));
swp_pte = swp_entry_to_pte(swp_entry); swp_pte = swp_entry_to_pte(swp_entry);
if (pte_soft_dirty(entry)) if (pte_soft_dirty(entry))
...@@ -3028,6 +3104,7 @@ static void unfreeze_page_vma(struct vm_area_struct *vma, struct page *page, ...@@ -3028,6 +3104,7 @@ static void unfreeze_page_vma(struct vm_area_struct *vma, struct page *page,
page_add_anon_rmap(page, vma, address, false); page_add_anon_rmap(page, vma, address, false);
entry = pte_mkold(mk_pte(page, vma->vm_page_prot)); entry = pte_mkold(mk_pte(page, vma->vm_page_prot));
if (PageDirty(page))
entry = pte_mkdirty(entry); entry = pte_mkdirty(entry);
if (is_write_migration_entry(swp_entry)) if (is_write_migration_entry(swp_entry))
entry = maybe_mkwrite(entry, vma); entry = maybe_mkwrite(entry, vma);
...@@ -3089,8 +3166,8 @@ static int __split_huge_page_tail(struct page *head, int tail, ...@@ -3089,8 +3166,8 @@ static int __split_huge_page_tail(struct page *head, int tail,
(1L << PG_uptodate) | (1L << PG_uptodate) |
(1L << PG_active) | (1L << PG_active) |
(1L << PG_locked) | (1L << PG_locked) |
(1L << PG_unevictable))); (1L << PG_unevictable) |
page_tail->flags |= (1L << PG_dirty); (1L << PG_dirty)));
/* /*
* After clearing PageTail the gup refcount can be released. * After clearing PageTail the gup refcount can be released.
......
...@@ -271,8 +271,13 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, ...@@ -271,8 +271,13 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
pte_t *orig_pte, *pte, ptent; pte_t *orig_pte, *pte, ptent;
struct page *page; struct page *page;
int nr_swap = 0; int nr_swap = 0;
unsigned long next;
next = pmd_addr_end(addr, end);
if (pmd_trans_huge(*pmd))
if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
goto next;
split_huge_pmd(vma, pmd, addr);
if (pmd_trans_unstable(pmd)) if (pmd_trans_unstable(pmd))
return 0; return 0;
...@@ -383,6 +388,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, ...@@ -383,6 +388,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
arch_leave_lazy_mmu_mode(); arch_leave_lazy_mmu_mode();
pte_unmap_unlock(orig_pte, ptl); pte_unmap_unlock(orig_pte, ptl);
cond_resched(); cond_resched();
next:
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment