Commit cd775580 authored by Baolin Wang's avatar Baolin Wang Committed by Andrew Morton

mm: change to return bool for isolate_movable_page()

Now the isolate_movable_page() can only return 0 or -EBUSY, and no users
will care about the negative return value, thus we can convert the
isolate_movable_page() to return a boolean value to make the code more
clear when checking the movable page isolation state.

No functional changes intended.

[akpm@linux-foundation.org: remove unneeded comment, per Matthew]
Link: https://lkml.kernel.org/r/cb877f73f4fff8d309611082ec740a7065b1ade0.1676424378.git.baolin.wang@linux.alibaba.comSigned-off-by: default avatarBaolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: default avatarSeongJae Park <sj@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 9747b9e9
...@@ -71,7 +71,7 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, ...@@ -71,7 +71,7 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason, unsigned long private, enum migrate_mode mode, int reason,
unsigned int *ret_succeeded); unsigned int *ret_succeeded);
extern struct page *alloc_migration_target(struct page *page, unsigned long private); extern struct page *alloc_migration_target(struct page *page, unsigned long private);
extern int isolate_movable_page(struct page *page, isolate_mode_t mode); extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
int migrate_huge_page_move_mapping(struct address_space *mapping, int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src); struct folio *dst, struct folio *src);
...@@ -92,8 +92,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new, ...@@ -92,8 +92,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
static inline struct page *alloc_migration_target(struct page *page, static inline struct page *alloc_migration_target(struct page *page,
unsigned long private) unsigned long private)
{ return NULL; } { return NULL; }
static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
{ return -EBUSY; } { return false; }
static inline int migrate_huge_page_move_mapping(struct address_space *mapping, static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src) struct folio *dst, struct folio *src)
......
...@@ -976,7 +976,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, ...@@ -976,7 +976,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
locked = NULL; locked = NULL;
} }
if (!isolate_movable_page(page, mode)) if (isolate_movable_page(page, mode))
goto isolate_success; goto isolate_success;
} }
......
...@@ -2515,8 +2515,8 @@ static bool isolate_page(struct page *page, struct list_head *pagelist) ...@@ -2515,8 +2515,8 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
if (lru) if (lru)
isolated = isolate_lru_page(page); isolated = isolate_lru_page(page);
else else
isolated = !isolate_movable_page(page, isolated = isolate_movable_page(page,
ISOLATE_UNEVICTABLE); ISOLATE_UNEVICTABLE);
if (isolated) { if (isolated) {
list_add(&page->lru, pagelist); list_add(&page->lru, pagelist);
......
...@@ -1668,18 +1668,18 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) ...@@ -1668,18 +1668,18 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* We can skip free pages. And we can deal with pages on * We can skip free pages. And we can deal with pages on
* LRU and non-lru movable pages. * LRU and non-lru movable pages.
*/ */
if (PageLRU(page)) { if (PageLRU(page))
isolated = isolate_lru_page(page); isolated = isolate_lru_page(page);
ret = isolated ? 0 : -EBUSY; else
} else isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); if (isolated) {
if (!ret) { /* Success */
list_add_tail(&page->lru, &source); list_add_tail(&page->lru, &source);
if (!__PageMovable(page)) if (!__PageMovable(page))
inc_node_page_state(page, NR_ISOLATED_ANON + inc_node_page_state(page, NR_ISOLATED_ANON +
page_is_file_lru(page)); page_is_file_lru(page));
} else { } else {
ret = -EBUSY;
if (__ratelimit(&migrate_rs)) { if (__ratelimit(&migrate_rs)) {
pr_warn("failed to isolate pfn %lx\n", pfn); pr_warn("failed to isolate pfn %lx\n", pfn);
dump_page(page, "isolation failed"); dump_page(page, "isolation failed");
......
...@@ -58,7 +58,7 @@ ...@@ -58,7 +58,7 @@
#include "internal.h" #include "internal.h"
int isolate_movable_page(struct page *page, isolate_mode_t mode) bool isolate_movable_page(struct page *page, isolate_mode_t mode)
{ {
struct folio *folio = folio_get_nontail_page(page); struct folio *folio = folio_get_nontail_page(page);
const struct movable_operations *mops; const struct movable_operations *mops;
...@@ -119,14 +119,14 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode) ...@@ -119,14 +119,14 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode)
folio_set_isolated(folio); folio_set_isolated(folio);
folio_unlock(folio); folio_unlock(folio);
return 0; return true;
out_no_isolated: out_no_isolated:
folio_unlock(folio); folio_unlock(folio);
out_putfolio: out_putfolio:
folio_put(folio); folio_put(folio);
out: out:
return -EBUSY; return false;
} }
static void putback_movable_folio(struct folio *folio) static void putback_movable_folio(struct folio *folio)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment