Commit 97729534 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm/ksm: page_add_anon_rmap() -> folio_add_anon_rmap_pte()

Let's convert replace_page().  While at it, perform some folio conversion.

Link: https://lkml.kernel.org/r/20231220224504.646757-19-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a15dc478
...@@ -1369,6 +1369,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, ...@@ -1369,6 +1369,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
static int replace_page(struct vm_area_struct *vma, struct page *page, static int replace_page(struct vm_area_struct *vma, struct page *page,
struct page *kpage, pte_t orig_pte) struct page *kpage, pte_t orig_pte)
{ {
struct folio *kfolio = page_folio(kpage);
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct folio *folio; struct folio *folio;
pmd_t *pmd; pmd_t *pmd;
...@@ -1408,15 +1409,16 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, ...@@ -1408,15 +1409,16 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
goto out_mn; goto out_mn;
} }
VM_BUG_ON_PAGE(PageAnonExclusive(page), page); VM_BUG_ON_PAGE(PageAnonExclusive(page), page);
VM_BUG_ON_PAGE(PageAnon(kpage) && PageAnonExclusive(kpage), kpage); VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage),
kfolio);
/* /*
* No need to check ksm_use_zero_pages here: we can only have a * No need to check ksm_use_zero_pages here: we can only have a
* zero_page here if ksm_use_zero_pages was enabled already. * zero_page here if ksm_use_zero_pages was enabled already.
*/ */
if (!is_zero_pfn(page_to_pfn(kpage))) { if (!is_zero_pfn(page_to_pfn(kpage))) {
get_page(kpage); folio_get(kfolio);
page_add_anon_rmap(kpage, vma, addr, RMAP_NONE); folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE);
newpte = mk_pte(kpage, vma->vm_page_prot); newpte = mk_pte(kpage, vma->vm_page_prot);
} else { } else {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment