Commit a4d1a885 authored by Jérôme Glisse's avatar Jérôme Glisse Committed by Linus Torvalds

dax: update to new mmu_notifier semantic

Replace all mmu_notifier_invalidate_page() calls by *_invalidate_range()
and make sure it is bracketed by calls to *_invalidate_range_start()/end().

Note that because we can not presume the pmd value or pte value we have
to assume the worst and unconditionaly report an invalidation as
happening.
Signed-off-by: default avatarJérôme Glisse <jglisse@redhat.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Bernhard Held <berny156@gmx.de>
Cc: Adam Borowski <kilobyte@angband.pl>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Wanpeng Li <kernellwp@gmail.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Takashi Iwai <tiwai@suse.de>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: axie <axie@amd.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 42ff72cf
...@@ -646,11 +646,10 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, ...@@ -646,11 +646,10 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
pte_t pte, *ptep = NULL; pte_t pte, *ptep = NULL;
pmd_t *pmdp = NULL; pmd_t *pmdp = NULL;
spinlock_t *ptl; spinlock_t *ptl;
bool changed;
i_mmap_lock_read(mapping); i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
unsigned long address; unsigned long address, start, end;
cond_resched(); cond_resched();
...@@ -658,8 +657,13 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, ...@@ -658,8 +657,13 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
continue; continue;
address = pgoff_address(index, vma); address = pgoff_address(index, vma);
changed = false;
if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl)) /*
* Note because we provide start/end to follow_pte_pmd it will
* call mmu_notifier_invalidate_range_start() on our behalf
* before taking any lock.
*/
if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
continue; continue;
if (pmdp) { if (pmdp) {
...@@ -676,7 +680,7 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, ...@@ -676,7 +680,7 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
pmd = pmd_wrprotect(pmd); pmd = pmd_wrprotect(pmd);
pmd = pmd_mkclean(pmd); pmd = pmd_mkclean(pmd);
set_pmd_at(vma->vm_mm, address, pmdp, pmd); set_pmd_at(vma->vm_mm, address, pmdp, pmd);
changed = true; mmu_notifier_invalidate_range(vma->vm_mm, start, end);
unlock_pmd: unlock_pmd:
spin_unlock(ptl); spin_unlock(ptl);
#endif #endif
...@@ -691,13 +695,12 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, ...@@ -691,13 +695,12 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
pte = pte_wrprotect(pte); pte = pte_wrprotect(pte);
pte = pte_mkclean(pte); pte = pte_mkclean(pte);
set_pte_at(vma->vm_mm, address, ptep, pte); set_pte_at(vma->vm_mm, address, ptep, pte);
changed = true; mmu_notifier_invalidate_range(vma->vm_mm, start, end);
unlock_pte: unlock_pte:
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
} }
if (changed) mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
mmu_notifier_invalidate_page(vma->vm_mm, address);
} }
i_mmap_unlock_read(mapping); i_mmap_unlock_read(mapping);
} }
......
...@@ -1260,6 +1260,7 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, ...@@ -1260,6 +1260,7 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
void unmap_mapping_range(struct address_space *mapping, void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows); loff_t const holebegin, loff_t const holelen, int even_cows);
int follow_pte_pmd(struct mm_struct *mm, unsigned long address, int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
unsigned long *start, unsigned long *end,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address, int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn); unsigned long *pfn);
......
...@@ -4008,7 +4008,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) ...@@ -4008,7 +4008,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
#endif /* __PAGETABLE_PMD_FOLDED */ #endif /* __PAGETABLE_PMD_FOLDED */
static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) unsigned long *start, unsigned long *end,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
{ {
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d; p4d_t *p4d;
...@@ -4035,17 +4036,29 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, ...@@ -4035,17 +4036,29 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
if (!pmdpp) if (!pmdpp)
goto out; goto out;
if (start && end) {
*start = address & PMD_MASK;
*end = *start + PMD_SIZE;
mmu_notifier_invalidate_range_start(mm, *start, *end);
}
*ptlp = pmd_lock(mm, pmd); *ptlp = pmd_lock(mm, pmd);
if (pmd_huge(*pmd)) { if (pmd_huge(*pmd)) {
*pmdpp = pmd; *pmdpp = pmd;
return 0; return 0;
} }
spin_unlock(*ptlp); spin_unlock(*ptlp);
if (start && end)
mmu_notifier_invalidate_range_end(mm, *start, *end);
} }
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
goto out; goto out;
if (start && end) {
*start = address & PAGE_MASK;
*end = *start + PAGE_SIZE;
mmu_notifier_invalidate_range_start(mm, *start, *end);
}
ptep = pte_offset_map_lock(mm, pmd, address, ptlp); ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
if (!pte_present(*ptep)) if (!pte_present(*ptep))
goto unlock; goto unlock;
...@@ -4053,6 +4066,8 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, ...@@ -4053,6 +4066,8 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
return 0; return 0;
unlock: unlock:
pte_unmap_unlock(ptep, *ptlp); pte_unmap_unlock(ptep, *ptlp);
if (start && end)
mmu_notifier_invalidate_range_end(mm, *start, *end);
out: out:
return -EINVAL; return -EINVAL;
} }
...@@ -4064,20 +4079,21 @@ static inline int follow_pte(struct mm_struct *mm, unsigned long address, ...@@ -4064,20 +4079,21 @@ static inline int follow_pte(struct mm_struct *mm, unsigned long address,
/* (void) is needed to make gcc happy */ /* (void) is needed to make gcc happy */
(void) __cond_lock(*ptlp, (void) __cond_lock(*ptlp,
!(res = __follow_pte_pmd(mm, address, ptepp, NULL, !(res = __follow_pte_pmd(mm, address, NULL, NULL,
ptlp))); ptepp, NULL, ptlp)));
return res; return res;
} }
int follow_pte_pmd(struct mm_struct *mm, unsigned long address, int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
unsigned long *start, unsigned long *end,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
{ {
int res; int res;
/* (void) is needed to make gcc happy */ /* (void) is needed to make gcc happy */
(void) __cond_lock(*ptlp, (void) __cond_lock(*ptlp,
!(res = __follow_pte_pmd(mm, address, ptepp, pmdpp, !(res = __follow_pte_pmd(mm, address, start, end,
ptlp))); ptepp, pmdpp, ptlp)));
return res; return res;
} }
EXPORT_SYMBOL(follow_pte_pmd); EXPORT_SYMBOL(follow_pte_pmd);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment