Commit f7f9c00d authored by Baolin Wang's avatar Baolin Wang Committed by Andrew Morton

mm: change to return bool for isolate_lru_page()

The isolate_lru_page() can only return 0 or -EBUSY, and most users did not
care about the negative error of isolate_lru_page(), except one user in
add_page_for_migration().  So we can convert the isolate_lru_page() to
return a boolean value, which can help to make the code more clear when
checking the return value of isolate_lru_page().

Also convert all users' logic of checking the isolation state.

No functional changes intended.

Link: https://lkml.kernel.org/r/3074c1ab628d9dbf139b33f248a8bc253a3f95f0.1676424378.git.baolin.wang@linux.alibaba.comSigned-off-by: default avatarBaolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: default avatarSeongJae Park <sj@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent be2d5756
...@@ -113,17 +113,11 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping, ...@@ -113,17 +113,11 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
} }
EXPORT_SYMBOL(grab_cache_page_write_begin); EXPORT_SYMBOL(grab_cache_page_write_begin);
int isolate_lru_page(struct page *page) bool isolate_lru_page(struct page *page)
{ {
bool ret;
if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page")) if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
return -EBUSY; return false;
ret = folio_isolate_lru((struct folio *)page); return folio_isolate_lru((struct folio *)page);
if (ret)
return 0;
return -EBUSY;
} }
void putback_lru_page(struct page *page) void putback_lru_page(struct page *page)
......
...@@ -187,7 +187,7 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr, ...@@ -187,7 +187,7 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
/* /*
* in mm/vmscan.c: * in mm/vmscan.c:
*/ */
int isolate_lru_page(struct page *page); bool isolate_lru_page(struct page *page);
bool folio_isolate_lru(struct folio *folio); bool folio_isolate_lru(struct folio *folio);
void putback_lru_page(struct page *page); void putback_lru_page(struct page *page);
void folio_putback_lru(struct folio *folio); void folio_putback_lru(struct folio *folio);
......
...@@ -636,7 +636,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, ...@@ -636,7 +636,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
* Isolate the page to avoid collapsing an hugepage * Isolate the page to avoid collapsing an hugepage
* currently in use by the VM. * currently in use by the VM.
*/ */
if (isolate_lru_page(page)) { if (!isolate_lru_page(page)) {
unlock_page(page); unlock_page(page);
result = SCAN_DEL_PAGE_LRU; result = SCAN_DEL_PAGE_LRU;
goto out; goto out;
......
...@@ -6176,7 +6176,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, ...@@ -6176,7 +6176,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
if (target_type == MC_TARGET_PAGE) { if (target_type == MC_TARGET_PAGE) {
page = target.page; page = target.page;
if (!isolate_lru_page(page)) { if (isolate_lru_page(page)) {
if (!mem_cgroup_move_account(page, true, if (!mem_cgroup_move_account(page, true,
mc.from, mc.to)) { mc.from, mc.to)) {
mc.precharge -= HPAGE_PMD_NR; mc.precharge -= HPAGE_PMD_NR;
...@@ -6226,7 +6226,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, ...@@ -6226,7 +6226,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
*/ */
if (PageTransCompound(page)) if (PageTransCompound(page))
goto put; goto put;
if (!device && isolate_lru_page(page)) if (!device && !isolate_lru_page(page))
goto put; goto put;
if (!mem_cgroup_move_account(page, false, if (!mem_cgroup_move_account(page, false,
mc.from, mc.to)) { mc.from, mc.to)) {
......
...@@ -846,7 +846,7 @@ static const char * const action_page_types[] = { ...@@ -846,7 +846,7 @@ static const char * const action_page_types[] = {
*/ */
static int delete_from_lru_cache(struct page *p) static int delete_from_lru_cache(struct page *p)
{ {
if (!isolate_lru_page(p)) { if (isolate_lru_page(p)) {
/* /*
* Clear sensible page flags, so that the buddy system won't * Clear sensible page flags, so that the buddy system won't
* complain when the page is unpoison-and-freed. * complain when the page is unpoison-and-freed.
...@@ -2513,7 +2513,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist) ...@@ -2513,7 +2513,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
bool lru = !__PageMovable(page); bool lru = !__PageMovable(page);
if (lru) if (lru)
isolated = !isolate_lru_page(page); isolated = isolate_lru_page(page);
else else
isolated = !isolate_movable_page(page, isolated = !isolate_movable_page(page,
ISOLATE_UNEVICTABLE); ISOLATE_UNEVICTABLE);
......
...@@ -1632,6 +1632,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) ...@@ -1632,6 +1632,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
for (pfn = start_pfn; pfn < end_pfn; pfn++) { for (pfn = start_pfn; pfn < end_pfn; pfn++) {
struct folio *folio; struct folio *folio;
bool isolated;
if (!pfn_valid(pfn)) if (!pfn_valid(pfn))
continue; continue;
...@@ -1667,9 +1668,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) ...@@ -1667,9 +1668,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* We can skip free pages. And we can deal with pages on * We can skip free pages. And we can deal with pages on
* LRU and non-lru movable pages. * LRU and non-lru movable pages.
*/ */
if (PageLRU(page)) if (PageLRU(page)) {
ret = isolate_lru_page(page); isolated = isolate_lru_page(page);
else ret = isolated ? 0 : -EBUSY;
} else
ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
if (!ret) { /* Success */ if (!ret) { /* Success */
list_add_tail(&page->lru, &source); list_add_tail(&page->lru, &source);
......
...@@ -2132,11 +2132,14 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, ...@@ -2132,11 +2132,14 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
} }
} else { } else {
struct page *head; struct page *head;
bool isolated;
head = compound_head(page); head = compound_head(page);
err = isolate_lru_page(head); isolated = isolate_lru_page(head);
if (err) if (!isolated) {
err = -EBUSY;
goto out_putpage; goto out_putpage;
}
err = 1; err = 1;
list_add_tail(&head->lru, pagelist); list_add_tail(&head->lru, pagelist);
...@@ -2541,7 +2544,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) ...@@ -2541,7 +2544,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
return 0; return 0;
} }
if (isolate_lru_page(page)) if (!isolate_lru_page(page))
return 0; return 0;
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
......
...@@ -388,7 +388,7 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns, ...@@ -388,7 +388,7 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
allow_drain = false; allow_drain = false;
} }
if (isolate_lru_page(page)) { if (!isolate_lru_page(page)) {
src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
restore++; restore++;
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment