Commit 2b635dd3 authored by Will Deacon's avatar Will Deacon

mm: Avoid modifying vmf.address in __collapse_huge_page_swapin()

In preparation for const-ifying the anonymous struct field of
'struct vm_fault', rework __collapse_huge_page_swapin() to avoid
continuously updating vmf.address and instead populate a new
'struct vm_fault' on the stack for each page being processed.

Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 9d3af4b4
...@@ -991,38 +991,41 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, ...@@ -991,38 +991,41 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
static bool __collapse_huge_page_swapin(struct mm_struct *mm, static bool __collapse_huge_page_swapin(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, unsigned long haddr, pmd_t *pmd,
int referenced) int referenced)
{ {
int swapped_in = 0; int swapped_in = 0;
vm_fault_t ret = 0; vm_fault_t ret = 0;
unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
for (address = haddr; address < end; address += PAGE_SIZE) {
struct vm_fault vmf = { struct vm_fault vmf = {
.vma = vma, .vma = vma,
.address = address, .address = address,
.pgoff = linear_page_index(vma, haddr),
.flags = FAULT_FLAG_ALLOW_RETRY, .flags = FAULT_FLAG_ALLOW_RETRY,
.pmd = pmd, .pmd = pmd,
.pgoff = linear_page_index(vma, address),
}; };
vmf.pte = pte_offset_map(pmd, address); vmf.pte = pte_offset_map(pmd, address);
for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
vmf.pte++, vmf.address += PAGE_SIZE) {
vmf.orig_pte = *vmf.pte; vmf.orig_pte = *vmf.pte;
if (!is_swap_pte(vmf.orig_pte)) if (!is_swap_pte(vmf.orig_pte)) {
pte_unmap(vmf.pte);
continue; continue;
}
swapped_in++; swapped_in++;
ret = do_swap_page(&vmf); ret = do_swap_page(&vmf);
/* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */ /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
if (ret & VM_FAULT_RETRY) { if (ret & VM_FAULT_RETRY) {
mmap_read_lock(mm); mmap_read_lock(mm);
if (hugepage_vma_revalidate(mm, address, &vmf.vma)) { if (hugepage_vma_revalidate(mm, haddr, &vma)) {
/* vma is no longer available, don't continue to swapin */ /* vma is no longer available, don't continue to swapin */
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
return false; return false;
} }
/* check if the pmd is still valid */ /* check if the pmd is still valid */
if (mm_find_pmd(mm, address) != pmd) { if (mm_find_pmd(mm, haddr) != pmd) {
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
return false; return false;
} }
...@@ -1031,11 +1034,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, ...@@ -1031,11 +1034,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
return false; return false;
} }
/* pte is unmapped now, we need to map it */
vmf.pte = pte_offset_map(pmd, vmf.address);
} }
vmf.pte--;
pte_unmap(vmf.pte);
/* Drain LRU add pagevec to remove extra pin on the swapped in pages */ /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
if (swapped_in) if (swapped_in)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment