Commit ee0800c2 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: convert page_add_anon_rmap() to use a folio internally

The API for page_add_anon_rmap() needs to be page-based, because we can
add mappings of individual pages.  But inside the function, we want to
only call compound_head() once and then use the folio APIs instead of the
page APIs that each call compound_head().

Link: https://lkml.kernel.org/r/20230111142915.1001531-7-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 62beb906
...@@ -1207,10 +1207,11 @@ static void __page_check_anon_rmap(struct page *page, ...@@ -1207,10 +1207,11 @@ static void __page_check_anon_rmap(struct page *page,
* and to ensure that PageAnon is not being upgraded racily to PageKsm * and to ensure that PageAnon is not being upgraded racily to PageKsm
* (but PageKsm is never downgraded to PageAnon). * (but PageKsm is never downgraded to PageAnon).
*/ */
void page_add_anon_rmap(struct page *page, void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
struct vm_area_struct *vma, unsigned long address, rmap_t flags) unsigned long address, rmap_t flags)
{ {
atomic_t *mapped; struct folio *folio = page_folio(page);
atomic_t *mapped = &folio->_nr_pages_mapped;
int nr = 0, nr_pmdmapped = 0; int nr = 0, nr_pmdmapped = 0;
bool compound = flags & RMAP_COMPOUND; bool compound = flags & RMAP_COMPOUND;
bool first = true; bool first = true;
...@@ -1219,20 +1220,18 @@ void page_add_anon_rmap(struct page *page, ...@@ -1219,20 +1220,18 @@ void page_add_anon_rmap(struct page *page,
if (likely(!compound)) { if (likely(!compound)) {
first = atomic_inc_and_test(&page->_mapcount); first = atomic_inc_and_test(&page->_mapcount);
nr = first; nr = first;
if (first && PageCompound(page)) { if (first && folio_test_large(folio)) {
mapped = subpages_mapcount_ptr(compound_head(page));
nr = atomic_inc_return_relaxed(mapped); nr = atomic_inc_return_relaxed(mapped);
nr = (nr < COMPOUND_MAPPED); nr = (nr < COMPOUND_MAPPED);
} }
} else if (PageTransHuge(page)) { } else if (folio_test_pmd_mappable(folio)) {
/* That test is redundant: it's for safety or to optimize out */ /* That test is redundant: it's for safety or to optimize out */
first = atomic_inc_and_test(compound_mapcount_ptr(page)); first = atomic_inc_and_test(&folio->_entire_mapcount);
if (first) { if (first) {
mapped = subpages_mapcount_ptr(page);
nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
nr_pmdmapped = thp_nr_pages(page); nr_pmdmapped = folio_nr_pages(folio);
nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
/* Raced ahead of a remove and another add? */ /* Raced ahead of a remove and another add? */
if (unlikely(nr < 0)) if (unlikely(nr < 0))
...@@ -1248,11 +1247,11 @@ void page_add_anon_rmap(struct page *page, ...@@ -1248,11 +1247,11 @@ void page_add_anon_rmap(struct page *page,
VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
if (nr_pmdmapped) if (nr_pmdmapped)
__mod_lruvec_page_state(page, NR_ANON_THPS, nr_pmdmapped); __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped);
if (nr) if (nr)
__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
if (likely(!PageKsm(page))) { if (likely(!folio_test_ksm(folio))) {
/* address might be in next vma when migration races vma_adjust */ /* address might be in next vma when migration races vma_adjust */
if (first) if (first)
__page_set_anon_rmap(page, vma, address, __page_set_anon_rmap(page, vma, address,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment