Commit 84f0169e authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm/rmap: remove page_add_anon_rmap()

All users are gone, remove it and all traces.

Link: https://lkml.kernel.org/r/20231220224504.646757-22-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent b832a354
...@@ -239,8 +239,6 @@ void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages, ...@@ -239,8 +239,6 @@ void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages,
folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags) folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags)
void folio_add_anon_rmap_pmd(struct folio *, struct page *, void folio_add_anon_rmap_pmd(struct folio *, struct page *,
struct vm_area_struct *, unsigned long address, rmap_t flags); struct vm_area_struct *, unsigned long address, rmap_t flags);
void page_add_anon_rmap(struct page *, struct vm_area_struct *,
unsigned long address, rmap_t flags);
void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address); unsigned long address);
void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages, void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
......
...@@ -1270,7 +1270,7 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page, ...@@ -1270,7 +1270,7 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page,
* The page's anon-rmap details (mapping and index) are guaranteed to * The page's anon-rmap details (mapping and index) are guaranteed to
* be set up correctly at this point. * be set up correctly at this point.
* *
* We have exclusion against page_add_anon_rmap because the caller * We have exclusion against folio_add_anon_rmap_*() because the caller
* always holds the page locked. * always holds the page locked.
* *
* We have exclusion against folio_add_new_anon_rmap because those pages * We have exclusion against folio_add_new_anon_rmap because those pages
...@@ -1283,29 +1283,6 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page, ...@@ -1283,29 +1283,6 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page,
page); page);
} }
/**
* page_add_anon_rmap - add pte mapping to an anonymous page
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
* @flags: the rmap flags
*
* The caller needs to hold the pte lock, and the page must be locked in
* the anon_vma case: to serialize mapping,index checking after setting,
* and to ensure that PageAnon is not being upgraded racily to PageKsm
* (but PageKsm is never downgraded to PageAnon).
*/
void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
unsigned long address, rmap_t flags)
{
struct folio *folio = page_folio(page);
if (likely(!(flags & RMAP_COMPOUND)))
folio_add_anon_rmap_pte(folio, page, vma, address, flags);
else
folio_add_anon_rmap_pmd(folio, page, vma, address, flags);
}
static __always_inline void __folio_add_anon_rmap(struct folio *folio, static __always_inline void __folio_add_anon_rmap(struct folio *folio,
struct page *page, int nr_pages, struct vm_area_struct *vma, struct page *page, int nr_pages, struct vm_area_struct *vma,
unsigned long address, rmap_t flags, enum rmap_level level) unsigned long address, rmap_t flags, enum rmap_level level)
...@@ -1419,7 +1396,7 @@ void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, ...@@ -1419,7 +1396,7 @@ void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page,
* @vma: the vm area in which the mapping is added * @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped * @address: the user virtual address mapped
* *
* Like page_add_anon_rmap() but must only be called on *new* folios. * Like folio_add_anon_rmap_*() but must only be called on *new* folios.
* This means the inc-and-test can be bypassed. * This means the inc-and-test can be bypassed.
* The folio does not have to be locked. * The folio does not have to be locked.
* *
...@@ -1479,7 +1456,7 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio, ...@@ -1479,7 +1456,7 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio,
if (nr) if (nr)
__lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr); __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr);
/* See comments in page_add_anon_rmap() */ /* See comments in folio_add_anon_rmap_*() */
if (!folio_test_large(folio)) if (!folio_test_large(folio))
mlock_vma_folio(folio, vma); mlock_vma_folio(folio, vma);
} }
...@@ -1593,7 +1570,7 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, ...@@ -1593,7 +1570,7 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
/* /*
* It would be tidy to reset folio_test_anon mapping when fully * It would be tidy to reset folio_test_anon mapping when fully
* unmapped, but that might overwrite a racing page_add_anon_rmap * unmapped, but that might overwrite a racing folio_add_anon_rmap_*()
* which increments mapcount after us but sets mapping before us: * which increments mapcount after us but sets mapping before us:
* so leave the reset to free_pages_prepare, and remember that * so leave the reset to free_pages_prepare, and remember that
* it's only reliable while mapped. * it's only reliable while mapped.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment