Commit 9e16b7fb authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm,ksm: swapoff might need to copy

Before establishing that KSM page migration was the cause of my
WARN_ON_ONCE(page_mapped(page))s, I suspected that they came from the
lack of a ksm_might_need_to_copy() in swapoff's unuse_pte() - which in
many respects is equivalent to faulting in a page.

In fact I've never caught that as the cause: but in theory it does at
least need the KSM_RUN_UNMERGE check in ksm_might_need_to_copy(), to
avoid bringing a KSM page back in when it's not supposed to be.

I intended to copy how it's done in do_swap_page(), but have a strong
aversion to how "swapcache" ends up being used there: rework it with
"page != swapcache".
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Petr Holasek <pholasek@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Izik Eidus <izik.eidus@ravellosystems.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5117b3b8
...@@ -874,11 +874,17 @@ unsigned int count_swap_pages(int type, int free) ...@@ -874,11 +874,17 @@ unsigned int count_swap_pages(int type, int free)
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, swp_entry_t entry, struct page *page) unsigned long addr, swp_entry_t entry, struct page *page)
{ {
struct page *swapcache;
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
spinlock_t *ptl; spinlock_t *ptl;
pte_t *pte; pte_t *pte;
int ret = 1; int ret = 1;
swapcache = page;
page = ksm_might_need_to_copy(page, vma, addr);
if (unlikely(!page))
return -ENOMEM;
if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
GFP_KERNEL, &memcg)) { GFP_KERNEL, &memcg)) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -897,7 +903,10 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -897,7 +903,10 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
get_page(page); get_page(page);
set_pte_at(vma->vm_mm, addr, pte, set_pte_at(vma->vm_mm, addr, pte,
pte_mkold(mk_pte(page, vma->vm_page_prot))); pte_mkold(mk_pte(page, vma->vm_page_prot)));
page_add_anon_rmap(page, vma, addr); if (page == swapcache)
page_add_anon_rmap(page, vma, addr);
else /* ksm created a completely new copy */
page_add_new_anon_rmap(page, vma, addr);
mem_cgroup_commit_charge_swapin(page, memcg); mem_cgroup_commit_charge_swapin(page, memcg);
swap_free(entry); swap_free(entry);
/* /*
...@@ -908,6 +917,10 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -908,6 +917,10 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
out: out:
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
out_nolock: out_nolock:
if (page != swapcache) {
unlock_page(page);
put_page(page);
}
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment