Commit 7efecffb authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: remove mlock_vma_page()

All callers now have a folio and can call mlock_vma_folio().  Update the
documentation to refer to mlock_vma_folio().

Link: https://lkml.kernel.org/r/20230116192827.2146732-3-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 90c9d13a
......@@ -311,7 +311,7 @@ do end up getting faulted into this VM_LOCKED VMA, they will be handled in the
fault path - which is also how mlock2()'s MLOCK_ONFAULT areas are handled.
For each PTE (or PMD) being faulted into a VMA, the page add rmap function
calls mlock_vma_page(), which calls mlock_folio() when the VMA is VM_LOCKED
calls mlock_vma_folio(), which calls mlock_folio() when the VMA is VM_LOCKED
(unless it is a PTE mapping of a part of a transparent huge page). Or when
it is a newly allocated anonymous page, folio_add_lru_vma() calls
mlock_new_folio() instead: similar to mlock_folio(), but can make better
......@@ -413,7 +413,7 @@ However, since mlock_vma_pages_range() starts by setting VM_LOCKED on a VMA,
before mlocking any pages already present, if one of those pages were migrated
before mlock_pte_range() reached it, it would get counted twice in mlock_count.
To prevent that, mlock_vma_pages_range() temporarily marks the VMA as VM_IO,
so that mlock_vma_page() will skip it.
so that mlock_vma_folio() will skip it.
To complete page migration, we place the old and new pages back onto the LRU
afterwards. The "unneeded" page - old page on success, new page on failure -
......@@ -552,6 +552,6 @@ and node unevictable list.
rmap's folio_referenced_one(), called via vmscan's shrink_active_list() or
shrink_page_list(), and rmap's try_to_unmap_one() called via shrink_page_list(),
check for (3) pages still mapped into VM_LOCKED VMAs, and call mlock_vma_page()
check for (3) pages still mapped into VM_LOCKED VMAs, and call mlock_vma_folio()
to correct them. Such pages are culled to the unevictable list when released
by the shrinker.
......@@ -518,7 +518,7 @@ extern long faultin_vma_page_range(struct vm_area_struct *vma,
extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
unsigned long len);
/*
* mlock_vma_page() and munlock_vma_page():
* mlock_vma_folio() and munlock_vma_folio():
* should be called with vma's mmap_lock held for read or write,
* under page table lock for the pte/pmd being added or removed.
*
......@@ -547,12 +547,6 @@ static inline void mlock_vma_folio(struct folio *folio,
mlock_folio(folio);
}
static inline void mlock_vma_page(struct page *page,
struct vm_area_struct *vma, bool compound)
{
mlock_vma_folio(page_folio(page), vma, compound);
}
void munlock_folio(struct folio *folio);
static inline void munlock_vma_folio(struct folio *folio,
......@@ -656,8 +650,6 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
}
#else /* !CONFIG_MMU */
static inline void unmap_mapping_folio(struct folio *folio) { }
static inline void mlock_vma_page(struct page *page,
struct vm_area_struct *vma, bool compound) { }
static inline void munlock_vma_page(struct page *page,
struct vm_area_struct *vma, bool compound) { }
static inline void mlock_new_folio(struct folio *folio) { }
......
......@@ -370,9 +370,9 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma,
/*
* There is a slight chance that concurrent page migration,
* or page reclaim finding a page of this now-VM_LOCKED vma,
* will call mlock_vma_page() and raise page's mlock_count:
* will call mlock_vma_folio() and raise page's mlock_count:
* double counting, leaving the page unevictable indefinitely.
* Communicate this danger to mlock_vma_page() with VM_IO,
* Communicate this danger to mlock_vma_folio() with VM_IO,
* which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas.
* mmap_lock is held in write mode here, so this weird
* combination should not be visible to other mmap_lock users;
......
......@@ -1260,7 +1260,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
__page_check_anon_rmap(page, vma, address);
}
mlock_vma_page(page, vma, compound);
mlock_vma_folio(folio, vma, compound);
}
/**
......@@ -1351,7 +1351,7 @@ void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
if (nr)
__lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr);
mlock_vma_page(page, vma, compound);
mlock_vma_folio(folio, vma, compound);
}
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment