mm: Turn isolate_lru_page() into folio_isolate_lru()

Add isolate_lru_page() as a wrapper around isolate_lru_folio().
TestClearPageLRU() would have always failed on a tail page, so
returning -EBUSY is the same behaviour.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
parent 659508f9
......@@ -21,7 +21,6 @@ extern void destroy_context(struct mm_struct *mm);
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct mm_iommu_table_group_mem_t;
extern int isolate_lru_page(struct page *page); /* from internal.h */
extern bool mm_iommu_preregistered(struct mm_struct *mm);
extern long mm_iommu_new(struct mm_struct *mm,
unsigned long ua, unsigned long entries,
......
......@@ -7,6 +7,7 @@
#include <linux/migrate.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include "internal.h"
struct address_space *page_mapping(struct page *page)
{
......@@ -151,3 +152,10 @@ int try_to_release_page(struct page *page, gfp_t gfp)
return filemap_release_folio(page_folio(page), gfp);
}
EXPORT_SYMBOL(try_to_release_page);
int isolate_lru_page(struct page *page)
{
if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
return -EBUSY;
return folio_isolate_lru((struct folio *)page);
}
......@@ -152,7 +152,8 @@ extern unsigned long highest_memmap_pfn;
/*
* in mm/vmscan.c:
*/
extern int isolate_lru_page(struct page *page);
int isolate_lru_page(struct page *page);
int folio_isolate_lru(struct folio *folio);
extern void putback_lru_page(struct page *page);
extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
......
......@@ -2211,45 +2211,40 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
}
/**
* isolate_lru_page - tries to isolate a page from its LRU list
* @page: page to isolate from its LRU list
* folio_isolate_lru() - Try to isolate a folio from its LRU list.
* @folio: Folio to isolate from its LRU list.
*
* Isolates a @page from an LRU list, clears PageLRU and adjusts the
* vmstat statistic corresponding to whatever LRU list the page was on.
* Isolate a @folio from an LRU list and adjust the vmstat statistic
* corresponding to whatever LRU list the folio was on.
*
* Returns 0 if the page was removed from an LRU list.
* Returns -EBUSY if the page was not on an LRU list.
*
* The returned page will have PageLRU() cleared. If it was found on
* the active list, it will have PageActive set. If it was found on
* the unevictable list, it will have the PageUnevictable bit set. That flag
* The folio will have its LRU flag cleared. If it was found on the
* active list, it will have the Active flag set. If it was found on the
* unevictable list, it will have the Unevictable flag set. These flags
* may need to be cleared by the caller before letting the page go.
*
* The vmstat statistic corresponding to the list on which the page was
* found will be decremented.
*
* Restrictions:
* Context:
*
* (1) Must be called with an elevated refcount on the page. This is a
* fundamental difference from isolate_lru_pages (which is called
* fundamental difference from isolate_lru_pages() (which is called
* without a stable reference).
* (2) the lru_lock must not be held.
* (3) interrupts must be enabled.
* (2) The lru_lock must not be held.
* (3) Interrupts must be enabled.
*
* Return: 0 if the folio was removed from an LRU list.
* -EBUSY if the folio was not on an LRU list.
*/
int isolate_lru_page(struct page *page)
int folio_isolate_lru(struct folio *folio)
{
struct folio *folio = page_folio(page);
int ret = -EBUSY;
VM_BUG_ON_PAGE(!page_count(page), page);
WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
VM_BUG_ON_FOLIO(!folio_ref_count(folio), folio);
if (TestClearPageLRU(page)) {
if (folio_test_clear_lru(folio)) {
struct lruvec *lruvec;
get_page(page);
folio_get(folio);
lruvec = folio_lruvec_lock_irq(folio);
del_page_from_lru_list(page, lruvec);
lruvec_del_folio(lruvec, folio);
unlock_page_lruvec_irq(lruvec);
ret = 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment