Commit a23f517b authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: convert mm_counter() to take a folio

Now all callers of mm_counter() have a folio, convert mm_counter() to take
a folio.  Saves a call to compound_head() hidden inside PageAnon().

Link: https://lkml.kernel.org/r/20240111152429.3374566-10-willy@infradead.orgSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent eabafaaa
...@@ -723,7 +723,7 @@ static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) ...@@ -723,7 +723,7 @@ static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
else if (is_migration_entry(entry)) { else if (is_migration_entry(entry)) {
struct folio *folio = pfn_swap_entry_folio(entry); struct folio *folio = pfn_swap_entry_folio(entry);
dec_mm_counter(mm, mm_counter(&folio->page)); dec_mm_counter(mm, mm_counter(folio));
} }
free_swap_and_cache(entry); free_swap_and_cache(entry);
} }
......
...@@ -2603,11 +2603,11 @@ static inline int mm_counter_file(struct page *page) ...@@ -2603,11 +2603,11 @@ static inline int mm_counter_file(struct page *page)
return MM_FILEPAGES; return MM_FILEPAGES;
} }
static inline int mm_counter(struct page *page) static inline int mm_counter(struct folio *folio)
{ {
if (PageAnon(page)) if (folio_test_anon(folio))
return MM_ANONPAGES; return MM_ANONPAGES;
return mm_counter_file(page); return mm_counter_file(&folio->page);
} }
static inline unsigned long get_mm_rss(struct mm_struct *mm) static inline unsigned long get_mm_rss(struct mm_struct *mm)
......
...@@ -808,7 +808,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -808,7 +808,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
} else if (is_migration_entry(entry)) { } else if (is_migration_entry(entry)) {
folio = pfn_swap_entry_folio(entry); folio = pfn_swap_entry_folio(entry);
rss[mm_counter(&folio->page)]++; rss[mm_counter(folio)]++;
if (!is_readable_migration_entry(entry) && if (!is_readable_migration_entry(entry) &&
is_cow_mapping(vm_flags)) { is_cow_mapping(vm_flags)) {
...@@ -840,7 +840,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -840,7 +840,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* keep things as they are. * keep things as they are.
*/ */
folio_get(folio); folio_get(folio);
rss[mm_counter(page)]++; rss[mm_counter(folio)]++;
/* Cannot fail as these pages cannot get pinned. */ /* Cannot fail as these pages cannot get pinned. */
folio_try_dup_anon_rmap_pte(folio, page, src_vma); folio_try_dup_anon_rmap_pte(folio, page, src_vma);
...@@ -1476,7 +1476,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1476,7 +1476,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (pte_young(ptent) && likely(vma_has_recency(vma))) if (pte_young(ptent) && likely(vma_has_recency(vma)))
folio_mark_accessed(folio); folio_mark_accessed(folio);
} }
rss[mm_counter(page)]--; rss[mm_counter(folio)]--;
if (!delay_rmap) { if (!delay_rmap) {
folio_remove_rmap_pte(folio, page, vma); folio_remove_rmap_pte(folio, page, vma);
if (unlikely(page_mapcount(page) < 0)) if (unlikely(page_mapcount(page) < 0))
...@@ -1504,7 +1504,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1504,7 +1504,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
* see zap_install_uffd_wp_if_needed(). * see zap_install_uffd_wp_if_needed().
*/ */
WARN_ON_ONCE(!vma_is_anonymous(vma)); WARN_ON_ONCE(!vma_is_anonymous(vma));
rss[mm_counter(page)]--; rss[mm_counter(folio)]--;
if (is_device_private_entry(entry)) if (is_device_private_entry(entry))
folio_remove_rmap_pte(folio, page, vma); folio_remove_rmap_pte(folio, page, vma);
folio_put(folio); folio_put(folio);
...@@ -1519,7 +1519,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1519,7 +1519,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
folio = pfn_swap_entry_folio(entry); folio = pfn_swap_entry_folio(entry);
if (!should_zap_folio(details, folio)) if (!should_zap_folio(details, folio))
continue; continue;
rss[mm_counter(&folio->page)]--; rss[mm_counter(folio)]--;
} else if (pte_marker_entry_uffd_wp(entry)) { } else if (pte_marker_entry_uffd_wp(entry)) {
/* /*
* For anon: always drop the marker; for file: only * For anon: always drop the marker; for file: only
......
...@@ -1780,7 +1780,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1780,7 +1780,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
set_huge_pte_at(mm, address, pvmw.pte, pteval, set_huge_pte_at(mm, address, pvmw.pte, pteval,
hsz); hsz);
} else { } else {
dec_mm_counter(mm, mm_counter(&folio->page)); dec_mm_counter(mm, mm_counter(folio));
set_pte_at(mm, address, pvmw.pte, pteval); set_pte_at(mm, address, pvmw.pte, pteval);
} }
...@@ -1795,7 +1795,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1795,7 +1795,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
* migration) will not expect userfaults on already * migration) will not expect userfaults on already
* copied pages. * copied pages.
*/ */
dec_mm_counter(mm, mm_counter(&folio->page)); dec_mm_counter(mm, mm_counter(folio));
} else if (folio_test_anon(folio)) { } else if (folio_test_anon(folio)) {
swp_entry_t entry = page_swap_entry(subpage); swp_entry_t entry = page_swap_entry(subpage);
pte_t swp_pte; pte_t swp_pte;
...@@ -2181,7 +2181,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -2181,7 +2181,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
set_huge_pte_at(mm, address, pvmw.pte, pteval, set_huge_pte_at(mm, address, pvmw.pte, pteval,
hsz); hsz);
} else { } else {
dec_mm_counter(mm, mm_counter(&folio->page)); dec_mm_counter(mm, mm_counter(folio));
set_pte_at(mm, address, pvmw.pte, pteval); set_pte_at(mm, address, pvmw.pte, pteval);
} }
...@@ -2196,7 +2196,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -2196,7 +2196,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
* migration) will not expect userfaults on already * migration) will not expect userfaults on already
* copied pages. * copied pages.
*/ */
dec_mm_counter(mm, mm_counter(&folio->page)); dec_mm_counter(mm, mm_counter(folio));
} else { } else {
swp_entry_t entry; swp_entry_t entry;
pte_t swp_pte; pte_t swp_pte;
......
...@@ -124,7 +124,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd, ...@@ -124,7 +124,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
* Must happen after rmap, as mm_counter() checks mapping (via * Must happen after rmap, as mm_counter() checks mapping (via
* PageAnon()), which is set by __page_set_anon_rmap(). * PageAnon()), which is set by __page_set_anon_rmap().
*/ */
inc_mm_counter(dst_mm, mm_counter(page)); inc_mm_counter(dst_mm, mm_counter(folio));
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment