Commit 5b4bd90f authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

rmap: add folio parameter to __page_set_anon_rmap()

Avoid the compound_head() call in PageAnon() by passing in the folio that
all callers have.  Also save me from wondering whether page->mapping can
ever be overwritten on a tail page (I don't think it can, but I'm not 100%
sure).

Link: https://lkml.kernel.org/r/20230116192959.2147032-1-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent e0650a41
......@@ -1131,19 +1131,20 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
/**
* __page_set_anon_rmap - set up new anonymous rmap
* @page: Page or Hugepage to add to rmap
* @folio: Folio which contains page.
* @page: Page to add to rmap.
* @vma: VM area to add page to.
* @address: User virtual address of the mapping
* @exclusive: the page is exclusively owned by the current process
*/
static void __page_set_anon_rmap(struct page *page,
static void __page_set_anon_rmap(struct folio *folio, struct page *page,
struct vm_area_struct *vma, unsigned long address, int exclusive)
{
struct anon_vma *anon_vma = vma->anon_vma;
BUG_ON(!anon_vma);
if (PageAnon(page))
if (folio_test_anon(folio))
goto out;
/*
......@@ -1155,14 +1156,14 @@ static void __page_set_anon_rmap(struct page *page,
anon_vma = anon_vma->root;
/*
* page_idle does a lockless/optimistic rmap scan on page->mapping.
* page_idle does a lockless/optimistic rmap scan on folio->mapping.
* Make sure the compiler doesn't split the stores of anon_vma and
* the PAGE_MAPPING_ANON type identifier, otherwise the rmap code
* could mistake the mapping for a struct address_space and crash.
*/
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
page->index = linear_page_index(vma, address);
WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
folio->index = linear_page_index(vma, address);
out:
if (exclusive)
SetPageAnonExclusive(page);
......@@ -1254,7 +1255,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
if (likely(!folio_test_ksm(folio))) {
/* address might be in next vma when migration races vma_adjust */
if (first)
__page_set_anon_rmap(page, vma, address,
__page_set_anon_rmap(folio, page, vma, address,
!!(flags & RMAP_EXCLUSIVE));
else
__page_check_anon_rmap(page, vma, address);
......@@ -1297,7 +1298,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
}
__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
__page_set_anon_rmap(&folio->page, vma, address, 1);
__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
}
/**
......@@ -2528,7 +2529,7 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
if (first)
__page_set_anon_rmap(page, vma, address,
__page_set_anon_rmap(folio, page, vma, address,
!!(flags & RMAP_EXCLUSIVE));
}
......@@ -2541,6 +2542,6 @@ void hugepage_add_new_anon_rmap(struct page *page,
/* increment count (starts at -1) */
atomic_set(&folio->_entire_mapcount, 0);
folio_clear_hugetlb_restore_reserve(folio);
__page_set_anon_rmap(page, vma, address, 1);
__page_set_anon_rmap(folio, page, vma, address, 1);
}
#endif /* CONFIG_HUGETLB_PAGE */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment