Commit 6b27cc6c authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: convert mm_counter_file() to take a folio

Now all callers of mm_counter_file() have a folio, convert
mm_counter_file() to take a folio.  Saves a call to compound_head() hidden
inside PageSwapBacked().

Link: https://lkml.kernel.org/r/20240111152429.3374566-11-willy@infradead.orgSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a23f517b
...@@ -2595,10 +2595,10 @@ static inline void dec_mm_counter(struct mm_struct *mm, int member) ...@@ -2595,10 +2595,10 @@ static inline void dec_mm_counter(struct mm_struct *mm, int member)
mm_trace_rss_stat(mm, member); mm_trace_rss_stat(mm, member);
} }
/* Optimized variant when page is already known not to be PageAnon */ /* Optimized variant when folio is already known not to be anon */
static inline int mm_counter_file(struct page *page) static inline int mm_counter_file(struct folio *folio)
{ {
if (PageSwapBacked(page)) if (folio_test_swapbacked(folio))
return MM_SHMEMPAGES; return MM_SHMEMPAGES;
return MM_FILEPAGES; return MM_FILEPAGES;
} }
...@@ -2607,7 +2607,7 @@ static inline int mm_counter(struct folio *folio) ...@@ -2607,7 +2607,7 @@ static inline int mm_counter(struct folio *folio)
{ {
if (folio_test_anon(folio)) if (folio_test_anon(folio))
return MM_ANONPAGES; return MM_ANONPAGES;
return mm_counter_file(&folio->page); return mm_counter_file(folio);
} }
static inline unsigned long get_mm_rss(struct mm_struct *mm) static inline unsigned long get_mm_rss(struct mm_struct *mm)
......
...@@ -188,7 +188,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, ...@@ -188,7 +188,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
dec_mm_counter(mm, MM_ANONPAGES); dec_mm_counter(mm, MM_ANONPAGES);
if (!folio_test_anon(old_folio)) { if (!folio_test_anon(old_folio)) {
dec_mm_counter(mm, mm_counter_file(old_page)); dec_mm_counter(mm, mm_counter_file(old_folio));
inc_mm_counter(mm, MM_ANONPAGES); inc_mm_counter(mm, MM_ANONPAGES);
} }
......
...@@ -1931,7 +1931,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1931,7 +1931,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
} else { } else {
if (arch_needs_pgtable_deposit()) if (arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd); zap_deposited_table(tlb->mm, pmd);
add_mm_counter(tlb->mm, mm_counter_file(&folio->page), add_mm_counter(tlb->mm, mm_counter_file(folio),
-HPAGE_PMD_NR); -HPAGE_PMD_NR);
} }
...@@ -2456,7 +2456,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -2456,7 +2456,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
folio_remove_rmap_pmd(folio, page, vma); folio_remove_rmap_pmd(folio, page, vma);
folio_put(folio); folio_put(folio);
} }
add_mm_counter(mm, mm_counter_file(&folio->page), -HPAGE_PMD_NR); add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR);
return; return;
} }
......
...@@ -1634,7 +1634,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, ...@@ -1634,7 +1634,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
/* step 3: set proper refcount and mm_counters. */ /* step 3: set proper refcount and mm_counters. */
if (nr_ptes) { if (nr_ptes) {
folio_ref_sub(folio, nr_ptes); folio_ref_sub(folio, nr_ptes);
add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes); add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
} }
/* step 4: remove empty page table */ /* step 4: remove empty page table */
...@@ -1665,7 +1665,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, ...@@ -1665,7 +1665,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
if (nr_ptes) { if (nr_ptes) {
flush_tlb_mm(mm); flush_tlb_mm(mm);
folio_ref_sub(folio, nr_ptes); folio_ref_sub(folio, nr_ptes);
add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes); add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
} }
if (start_pte) if (start_pte)
pte_unmap_unlock(start_pte, ptl); pte_unmap_unlock(start_pte, ptl);
......
...@@ -966,7 +966,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, ...@@ -966,7 +966,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
} else if (page) { } else if (page) {
folio_get(folio); folio_get(folio);
folio_dup_file_rmap_pte(folio, page); folio_dup_file_rmap_pte(folio, page);
rss[mm_counter_file(page)]++; rss[mm_counter_file(folio)]++;
} }
/* /*
...@@ -1873,7 +1873,7 @@ static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, ...@@ -1873,7 +1873,7 @@ static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
return -EBUSY; return -EBUSY;
/* Ok, finally just insert the thing.. */ /* Ok, finally just insert the thing.. */
folio_get(folio); folio_get(folio);
inc_mm_counter(vma->vm_mm, mm_counter_file(page)); inc_mm_counter(vma->vm_mm, mm_counter_file(folio));
folio_add_file_rmap_pte(folio, page, vma); folio_add_file_rmap_pte(folio, page, vma);
set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot));
return 0; return 0;
...@@ -3178,7 +3178,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ...@@ -3178,7 +3178,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
if (old_folio) { if (old_folio) {
if (!folio_test_anon(old_folio)) { if (!folio_test_anon(old_folio)) {
dec_mm_counter(mm, mm_counter_file(&old_folio->page)); dec_mm_counter(mm, mm_counter_file(old_folio));
inc_mm_counter(mm, MM_ANONPAGES); inc_mm_counter(mm, MM_ANONPAGES);
} }
} else { } else {
...@@ -4483,7 +4483,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) ...@@ -4483,7 +4483,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
if (write) if (write)
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR);
folio_add_file_rmap_pmd(folio, page, vma); folio_add_file_rmap_pmd(folio, page, vma);
/* /*
...@@ -4546,7 +4546,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, ...@@ -4546,7 +4546,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
folio_add_new_anon_rmap(folio, vma, addr); folio_add_new_anon_rmap(folio, vma, addr);
folio_add_lru_vma(folio, vma); folio_add_lru_vma(folio, vma);
} else { } else {
add_mm_counter(vma->vm_mm, mm_counter_file(page), nr); add_mm_counter(vma->vm_mm, mm_counter_file(folio), nr);
folio_add_file_rmap_ptes(folio, page, nr, vma); folio_add_file_rmap_ptes(folio, page, nr, vma);
} }
set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
......
...@@ -1903,7 +1903,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1903,7 +1903,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
* *
* See Documentation/mm/mmu_notifier.rst * See Documentation/mm/mmu_notifier.rst
*/ */
dec_mm_counter(mm, mm_counter_file(&folio->page)); dec_mm_counter(mm, mm_counter_file(folio));
} }
discard: discard:
if (unlikely(folio_test_hugetlb(folio))) if (unlikely(folio_test_hugetlb(folio)))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment