Commit eabafaaa authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: convert to should_zap_page() to should_zap_folio()

Make should_zap_page() take a folio and rename it to should_zap_folio() as
preparation for converting mm counter functions to take a folio.  Saves a
call to compound_head() hidden inside PageAnon().

[wangkefeng.wang@huawei.com: fix used-uninitialized warning]
  Link: https://lkml.kernel.org/r/962a7993-fce9-4de8-85cd-25e290f25736@huawei.com
Link: https://lkml.kernel.org/r/20240111152429.3374566-9-willy@infradead.orgSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 530c2a0d
...@@ -1369,19 +1369,20 @@ static inline bool should_zap_cows(struct zap_details *details) ...@@ -1369,19 +1369,20 @@ static inline bool should_zap_cows(struct zap_details *details)
return details->even_cows; return details->even_cows;
} }
/* Decides whether we should zap this page with the page pointer specified */ /* Decides whether we should zap this folio with the folio pointer specified */
static inline bool should_zap_page(struct zap_details *details, struct page *page) static inline bool should_zap_folio(struct zap_details *details,
struct folio *folio)
{ {
/* If we can make a decision without *page.. */ /* If we can make a decision without *folio.. */
if (should_zap_cows(details)) if (should_zap_cows(details))
return true; return true;
/* E.g. the caller passes NULL for the case of a zero page */ /* E.g. the caller passes NULL for the case of a zero folio */
if (!page) if (!folio)
return true; return true;
/* Otherwise we should only zap non-anon pages */ /* Otherwise we should only zap non-anon folios */
return !PageAnon(page); return !folio_test_anon(folio);
} }
static inline bool zap_drop_file_uffd_wp(struct zap_details *details) static inline bool zap_drop_file_uffd_wp(struct zap_details *details)
...@@ -1434,7 +1435,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1434,7 +1435,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
arch_enter_lazy_mmu_mode(); arch_enter_lazy_mmu_mode();
do { do {
pte_t ptent = ptep_get(pte); pte_t ptent = ptep_get(pte);
struct folio *folio; struct folio *folio = NULL;
struct page *page; struct page *page;
if (pte_none(ptent)) if (pte_none(ptent))
...@@ -1447,7 +1448,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1447,7 +1448,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
unsigned int delay_rmap; unsigned int delay_rmap;
page = vm_normal_page(vma, addr, ptent); page = vm_normal_page(vma, addr, ptent);
if (unlikely(!should_zap_page(details, page))) if (page)
folio = page_folio(page);
if (unlikely(!should_zap_folio(details, folio)))
continue; continue;
ptent = ptep_get_and_clear_full(mm, addr, pte, ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm); tlb->fullmm);
...@@ -1460,7 +1464,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1460,7 +1464,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
continue; continue;
} }
folio = page_folio(page);
delay_rmap = 0; delay_rmap = 0;
if (!folio_test_anon(folio)) { if (!folio_test_anon(folio)) {
if (pte_dirty(ptent)) { if (pte_dirty(ptent)) {
...@@ -1492,7 +1495,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1492,7 +1495,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
is_device_exclusive_entry(entry)) { is_device_exclusive_entry(entry)) {
page = pfn_swap_entry_to_page(entry); page = pfn_swap_entry_to_page(entry);
folio = page_folio(page); folio = page_folio(page);
if (unlikely(!should_zap_page(details, page))) if (unlikely(!should_zap_folio(details, folio)))
continue; continue;
/* /*
* Both device private/exclusive mappings should only * Both device private/exclusive mappings should only
...@@ -1513,10 +1516,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1513,10 +1516,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (unlikely(!free_swap_and_cache(entry))) if (unlikely(!free_swap_and_cache(entry)))
print_bad_pte(vma, addr, ptent, NULL); print_bad_pte(vma, addr, ptent, NULL);
} else if (is_migration_entry(entry)) { } else if (is_migration_entry(entry)) {
page = pfn_swap_entry_to_page(entry); folio = pfn_swap_entry_folio(entry);
if (!should_zap_page(details, page)) if (!should_zap_folio(details, folio))
continue; continue;
rss[mm_counter(page)]--; rss[mm_counter(&folio->page)]--;
} else if (pte_marker_entry_uffd_wp(entry)) { } else if (pte_marker_entry_uffd_wp(entry)) {
/* /*
* For anon: always drop the marker; for file: only * For anon: always drop the marker; for file: only
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment