Commit 4897c765 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

thp: prepare for DAX huge pages

Add a vma_is_dax() helper macro to test whether the VMA is DAX, and use it
in zap_huge_pmd() and __split_huge_page_pmd().

[akpm@linux-foundation.org: fix build]
Signed-off-by: default avatarMatthew Wilcox <willy@linux.intel.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Jan Kara <jack@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7c414164
...@@ -18,4 +18,8 @@ int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); ...@@ -18,4 +18,8 @@ int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod) #define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod) #define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
static inline bool vma_is_dax(struct vm_area_struct *vma)
{
return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
}
#endif #endif
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/shrinker.h> #include <linux/shrinker.h>
#include <linux/mm_inline.h> #include <linux/mm_inline.h>
#include <linux/dax.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/khugepaged.h> #include <linux/khugepaged.h>
#include <linux/freezer.h> #include <linux/freezer.h>
...@@ -794,7 +795,7 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) ...@@ -794,7 +795,7 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
} }
/* Caller must hold page table lock. */ /* Caller must hold page table lock. */
static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
struct page *zero_page) struct page *zero_page)
{ {
...@@ -1421,7 +1422,6 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1421,7 +1422,6 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
int ret = 0; int ret = 0;
if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
struct page *page;
pgtable_t pgtable; pgtable_t pgtable;
pmd_t orig_pmd; pmd_t orig_pmd;
/* /*
...@@ -1433,13 +1433,22 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1433,13 +1433,22 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
tlb->fullmm); tlb->fullmm);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr); tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); if (vma_is_dax(vma)) {
if (is_huge_zero_pmd(orig_pmd)) {
pgtable = NULL;
} else {
spin_unlock(ptl);
return 1;
}
} else {
pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
}
if (is_huge_zero_pmd(orig_pmd)) { if (is_huge_zero_pmd(orig_pmd)) {
atomic_long_dec(&tlb->mm->nr_ptes); atomic_long_dec(&tlb->mm->nr_ptes);
spin_unlock(ptl); spin_unlock(ptl);
put_huge_zero_page(); put_huge_zero_page();
} else { } else {
page = pmd_page(orig_pmd); struct page *page = pmd_page(orig_pmd);
page_remove_rmap(page); page_remove_rmap(page);
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
...@@ -1448,7 +1457,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1448,7 +1457,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
spin_unlock(ptl); spin_unlock(ptl);
tlb_remove_page(tlb, page); tlb_remove_page(tlb, page);
} }
pte_free(tlb->mm, pgtable); if (pgtable)
pte_free(tlb->mm, pgtable);
ret = 1; ret = 1;
} }
return ret; return ret;
...@@ -2914,7 +2924,7 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, ...@@ -2914,7 +2924,7 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd) pmd_t *pmd)
{ {
spinlock_t *ptl; spinlock_t *ptl;
struct page *page; struct page *page = NULL;
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long haddr = address & HPAGE_PMD_MASK; unsigned long haddr = address & HPAGE_PMD_MASK;
unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_start; /* For mmu_notifiers */
...@@ -2927,25 +2937,25 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, ...@@ -2927,25 +2937,25 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
again: again:
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
ptl = pmd_lock(mm, pmd); ptl = pmd_lock(mm, pmd);
if (unlikely(!pmd_trans_huge(*pmd))) { if (unlikely(!pmd_trans_huge(*pmd)))
spin_unlock(ptl); goto unlock;
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); if (vma_is_dax(vma)) {
return; pmdp_huge_clear_flush(vma, haddr, pmd);
} } else if (is_huge_zero_pmd(*pmd)) {
if (is_huge_zero_pmd(*pmd)) {
__split_huge_zero_page_pmd(vma, haddr, pmd); __split_huge_zero_page_pmd(vma, haddr, pmd);
spin_unlock(ptl); } else {
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); page = pmd_page(*pmd);
return; VM_BUG_ON_PAGE(!page_count(page), page);
get_page(page);
} }
page = pmd_page(*pmd); unlock:
VM_BUG_ON_PAGE(!page_count(page), page);
get_page(page);
spin_unlock(ptl); spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
split_huge_page(page); if (!page)
return;
split_huge_page(page);
put_page(page); put_page(page);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment