Commit 9a5cc85c authored by Alistair Popple's avatar Alistair Popple Committed by Linus Torvalds

mm/memory.c: allow different return codes for copy_nonpresent_pte()

Currently if copy_nonpresent_pte() returns a non-zero value it is assumed
to be a swap entry which requires further processing outside the loop in
copy_pte_range() after dropping locks.  This prevents other values being
returned to signal conditions such as failure which a subsequent change
requires.

Instead make copy_nonpresent_pte() return an error code if further
processing is required and read the value for the swap entry in the main
loop under the ptl.

Link: https://lkml.kernel.org/r/20210616105937.23201-7-apopple@nvidia.comSigned-off-by: default avatarAlistair Popple <apopple@nvidia.com>
Reviewed-by: default avatarPeter Xu <peterx@redhat.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Shakeel Butt <shakeelb@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6b49bf6d
...@@ -717,7 +717,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -717,7 +717,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (likely(!non_swap_entry(entry))) { if (likely(!non_swap_entry(entry))) {
if (swap_duplicate(entry) < 0) if (swap_duplicate(entry) < 0)
return entry.val; return -EIO;
/* make sure dst_mm is on swapoff's mmlist. */ /* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) { if (unlikely(list_empty(&dst_mm->mmlist))) {
...@@ -973,12 +973,14 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, ...@@ -973,12 +973,14 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
continue; continue;
} }
if (unlikely(!pte_present(*src_pte))) { if (unlikely(!pte_present(*src_pte))) {
entry.val = copy_nonpresent_pte(dst_mm, src_mm, ret = copy_nonpresent_pte(dst_mm, src_mm,
dst_pte, src_pte, dst_pte, src_pte,
dst_vma, src_vma, dst_vma, src_vma,
addr, rss); addr, rss);
if (entry.val) if (ret == -EIO) {
entry = pte_to_swp_entry(*src_pte);
break; break;
}
progress += 8; progress += 8;
continue; continue;
} }
...@@ -1011,20 +1013,24 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, ...@@ -1011,20 +1013,24 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pte_unmap_unlock(orig_dst_pte, dst_ptl); pte_unmap_unlock(orig_dst_pte, dst_ptl);
cond_resched(); cond_resched();
if (entry.val) { if (ret == -EIO) {
VM_WARN_ON_ONCE(!entry.val);
if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) { if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
entry.val = 0; entry.val = 0;
} else if (ret) { } else if (ret == -EAGAIN) {
WARN_ON_ONCE(ret != -EAGAIN);
prealloc = page_copy_prealloc(src_mm, src_vma, addr); prealloc = page_copy_prealloc(src_mm, src_vma, addr);
if (!prealloc) if (!prealloc)
return -ENOMEM; return -ENOMEM;
} else if (ret) {
VM_WARN_ON_ONCE(1);
}
/* We've captured and resolved the error. Reset, try again. */ /* We've captured and resolved the error. Reset, try again. */
ret = 0; ret = 0;
}
if (addr != end) if (addr != end)
goto again; goto again;
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment