Commit 36eaff33 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

mm, ksm: convert write_protect_page() to use page_vma_mapped_walk()

For consistency, it worth converting all page_check_address() to
page_vma_mapped_walk(), so we could drop the former.

Link: http://lkml.kernel.org/r/20170129173858.45174-9-kirill.shutemov@linux.intel.comSigned-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c7ab0d2f
...@@ -856,33 +856,35 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, ...@@ -856,33 +856,35 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
pte_t *orig_pte) pte_t *orig_pte)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long addr; struct page_vma_mapped_walk pvmw = {
pte_t *ptep; .page = page,
spinlock_t *ptl; .vma = vma,
};
int swapped; int swapped;
int err = -EFAULT; int err = -EFAULT;
unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */
addr = page_address_in_vma(page, vma); pvmw.address = page_address_in_vma(page, vma);
if (addr == -EFAULT) if (pvmw.address == -EFAULT)
goto out; goto out;
BUG_ON(PageTransCompound(page)); BUG_ON(PageTransCompound(page));
mmun_start = addr; mmun_start = pvmw.address;
mmun_end = addr + PAGE_SIZE; mmun_end = pvmw.address + PAGE_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
ptep = page_check_address(page, mm, addr, &ptl, 0); if (!page_vma_mapped_walk(&pvmw))
if (!ptep)
goto out_mn; goto out_mn;
if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
goto out_unlock;
if (pte_write(*ptep) || pte_dirty(*ptep)) { if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte)) {
pte_t entry; pte_t entry;
swapped = PageSwapCache(page); swapped = PageSwapCache(page);
flush_cache_page(vma, addr, page_to_pfn(page)); flush_cache_page(vma, pvmw.address, page_to_pfn(page));
/* /*
* Ok this is tricky, when get_user_pages_fast() run it doesn't * Ok this is tricky, when get_user_pages_fast() run it doesn't
* take any lock, therefore the check that we are going to make * take any lock, therefore the check that we are going to make
...@@ -892,25 +894,25 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, ...@@ -892,25 +894,25 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
* this assure us that no O_DIRECT can happen after the check * this assure us that no O_DIRECT can happen after the check
* or in the middle of the check. * or in the middle of the check.
*/ */
entry = ptep_clear_flush_notify(vma, addr, ptep); entry = ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);
/* /*
* Check that no O_DIRECT or similar I/O is in progress on the * Check that no O_DIRECT or similar I/O is in progress on the
* page * page
*/ */
if (page_mapcount(page) + 1 + swapped != page_count(page)) { if (page_mapcount(page) + 1 + swapped != page_count(page)) {
set_pte_at(mm, addr, ptep, entry); set_pte_at(mm, pvmw.address, pvmw.pte, entry);
goto out_unlock; goto out_unlock;
} }
if (pte_dirty(entry)) if (pte_dirty(entry))
set_page_dirty(page); set_page_dirty(page);
entry = pte_mkclean(pte_wrprotect(entry)); entry = pte_mkclean(pte_wrprotect(entry));
set_pte_at_notify(mm, addr, ptep, entry); set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
} }
*orig_pte = *ptep; *orig_pte = *pvmw.pte;
err = 0; err = 0;
out_unlock: out_unlock:
pte_unmap_unlock(ptep, ptl); page_vma_mapped_walk_done(&pvmw);
out_mn: out_mn:
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment