Commit 2a52bcbc authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

rmap: extend try_to_unmap() to be usable by split_huge_page()

Add support for two ttu_flags:

  - TTU_SPLIT_HUGE_PMD would split PMD if it's there, before trying to
    unmap page;

  - TTU_RMAP_LOCKED indicates that caller holds relevant rmap lock;

Also, change rwc->done to !page_mapcount() instead of !page_mapped().
try_to_unmap() works on pte level, so we are really interested in the
mappedness of this small page rather than of the compound page it's a
part of.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b9773199
...@@ -106,6 +106,9 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -106,6 +106,9 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
__split_huge_pmd(__vma, __pmd, __address); \ __split_huge_pmd(__vma, __pmd, __address); \
} while (0) } while (0)
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address);
#if HPAGE_PMD_ORDER >= MAX_ORDER #if HPAGE_PMD_ORDER >= MAX_ORDER
#error "hugepages can't be allocated by the buddy allocator" #error "hugepages can't be allocated by the buddy allocator"
#endif #endif
...@@ -173,6 +176,10 @@ static inline int split_huge_page(struct page *page) ...@@ -173,6 +176,10 @@ static inline int split_huge_page(struct page *page)
static inline void deferred_split_huge_page(struct page *page) {} static inline void deferred_split_huge_page(struct page *page) {}
#define split_huge_pmd(__vma, __pmd, __address) \ #define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0) do { } while (0)
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
unsigned long address) {}
static inline int hugepage_madvise(struct vm_area_struct *vma, static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice) unsigned long *vm_flags, int advice)
{ {
......
...@@ -86,6 +86,7 @@ enum ttu_flags { ...@@ -86,6 +86,7 @@ enum ttu_flags {
TTU_MIGRATION = 2, /* migration mode */ TTU_MIGRATION = 2, /* migration mode */
TTU_MUNLOCK = 4, /* munlock mode */ TTU_MUNLOCK = 4, /* munlock mode */
TTU_LZFREE = 8, /* lazy free mode */ TTU_LZFREE = 8, /* lazy free mode */
TTU_SPLIT_HUGE_PMD = 16, /* split huge PMD if any */
TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
...@@ -93,6 +94,8 @@ enum ttu_flags { ...@@ -93,6 +94,8 @@ enum ttu_flags {
TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible
* and caller guarantees they will * and caller guarantees they will
* do a final flush if necessary */ * do a final flush if necessary */
TTU_RMAP_LOCKED = (1 << 12) /* do not grab rmap lock:
* caller holds it */
}; };
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
......
...@@ -3006,15 +3006,12 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -3006,15 +3006,12 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
} }
} }
static void split_huge_pmd_address(struct vm_area_struct *vma, void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address)
unsigned long address)
{ {
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
pgd = pgd_offset(vma->vm_mm, address); pgd = pgd_offset(vma->vm_mm, address);
if (!pgd_present(*pgd)) if (!pgd_present(*pgd))
return; return;
......
...@@ -1431,6 +1431,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1431,6 +1431,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
goto out; goto out;
if (flags & TTU_SPLIT_HUGE_PMD)
split_huge_pmd_address(vma, address);
pte = page_check_address(page, mm, address, &ptl, 0); pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte) if (!pte)
goto out; goto out;
...@@ -1576,10 +1578,10 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) ...@@ -1576,10 +1578,10 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
return is_vma_temporary_stack(vma); return is_vma_temporary_stack(vma);
} }
static int page_not_mapped(struct page *page) static int page_mapcount_is_zero(struct page *page)
{ {
return !page_mapped(page); return !page_mapcount(page);
}; }
/** /**
* try_to_unmap - try to remove all page table mappings to a page * try_to_unmap - try to remove all page table mappings to a page
...@@ -1606,12 +1608,10 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) ...@@ -1606,12 +1608,10 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
struct rmap_walk_control rwc = { struct rmap_walk_control rwc = {
.rmap_one = try_to_unmap_one, .rmap_one = try_to_unmap_one,
.arg = &rp, .arg = &rp,
.done = page_not_mapped, .done = page_mapcount_is_zero,
.anon_lock = page_lock_anon_vma_read, .anon_lock = page_lock_anon_vma_read,
}; };
VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
/* /*
* During exec, a temporary VMA is setup and later moved. * During exec, a temporary VMA is setup and later moved.
* The VMA is moved under the anon_vma lock but not the * The VMA is moved under the anon_vma lock but not the
...@@ -1623,9 +1623,12 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) ...@@ -1623,9 +1623,12 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page)) if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
rwc.invalid_vma = invalid_migration_vma; rwc.invalid_vma = invalid_migration_vma;
ret = rmap_walk(page, &rwc); if (flags & TTU_RMAP_LOCKED)
ret = rmap_walk_locked(page, &rwc);
else
ret = rmap_walk(page, &rwc);
if (ret != SWAP_MLOCK && !page_mapped(page)) { if (ret != SWAP_MLOCK && !page_mapcount(page)) {
ret = SWAP_SUCCESS; ret = SWAP_SUCCESS;
if (rp.lazyfreed && !PageDirty(page)) if (rp.lazyfreed && !PageDirty(page))
ret = SWAP_LZFREE; ret = SWAP_LZFREE;
...@@ -1633,6 +1636,11 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) ...@@ -1633,6 +1636,11 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
return ret; return ret;
} }
static int page_not_mapped(struct page *page)
{
return !page_mapped(page);
};
/** /**
* try_to_munlock - try to munlock a page * try_to_munlock - try to munlock a page
* @page: the page to be munlocked * @page: the page to be munlocked
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment