Commit c2135f7c authored by Alex Shi's avatar Alex Shi Committed by Linus Torvalds

mm/vmscan: __isolate_lru_page_prepare() cleanup

The function just returns 2 results, so using a 'switch' to deal with its
result is unnecessary.  Also simplify it to a bool func as Vlastimil
suggested.

Also remove 'goto' by reusing list_move(), and take Matthew Wilcox's
suggestion to update comments in function.

Link: https://lkml.kernel.org/r/728874d7-2d93-4049-68c1-dcc3b2d52ccd@linux.alibaba.comSigned-off-by: default avatarAlex Shi <alex.shi@linux.alibaba.com>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7ecc9565
...@@ -356,7 +356,7 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page, ...@@ -356,7 +356,7 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page,
extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask); gfp_t gfp_mask, nodemask_t *mask);
extern int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode); extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages, unsigned long nr_pages,
gfp_t gfp_mask, gfp_t gfp_mask,
......
...@@ -988,7 +988,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, ...@@ -988,7 +988,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
if (unlikely(!get_page_unless_zero(page))) if (unlikely(!get_page_unless_zero(page)))
goto isolate_fail; goto isolate_fail;
if (__isolate_lru_page_prepare(page, isolate_mode) != 0) if (!__isolate_lru_page_prepare(page, isolate_mode))
goto isolate_fail_put; goto isolate_fail_put;
/* Try isolate the page */ /* Try isolate the page */
......
...@@ -1539,19 +1539,17 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone, ...@@ -1539,19 +1539,17 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
* page: page to consider * page: page to consider
* mode: one of the LRU isolation modes defined above * mode: one of the LRU isolation modes defined above
* *
* returns 0 on success, -ve errno on failure. * returns true on success, false on failure.
*/ */
int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode) bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
{ {
int ret = -EBUSY;
/* Only take pages on the LRU. */ /* Only take pages on the LRU. */
if (!PageLRU(page)) if (!PageLRU(page))
return ret; return false;
/* Compaction should not handle unevictable pages but CMA can do so */ /* Compaction should not handle unevictable pages but CMA can do so */
if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
return ret; return false;
/* /*
* To minimise LRU disruption, the caller can indicate that it only * To minimise LRU disruption, the caller can indicate that it only
...@@ -1564,7 +1562,7 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode) ...@@ -1564,7 +1562,7 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
if (mode & ISOLATE_ASYNC_MIGRATE) { if (mode & ISOLATE_ASYNC_MIGRATE) {
/* All the caller can do on PageWriteback is block */ /* All the caller can do on PageWriteback is block */
if (PageWriteback(page)) if (PageWriteback(page))
return ret; return false;
if (PageDirty(page)) { if (PageDirty(page)) {
struct address_space *mapping; struct address_space *mapping;
...@@ -1580,20 +1578,20 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode) ...@@ -1580,20 +1578,20 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
* from the page cache. * from the page cache.
*/ */
if (!trylock_page(page)) if (!trylock_page(page))
return ret; return false;
mapping = page_mapping(page); mapping = page_mapping(page);
migrate_dirty = !mapping || mapping->a_ops->migratepage; migrate_dirty = !mapping || mapping->a_ops->migratepage;
unlock_page(page); unlock_page(page);
if (!migrate_dirty) if (!migrate_dirty)
return ret; return false;
} }
} }
if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
return ret; return false;
return 0; return true;
} }
/* /*
...@@ -1677,35 +1675,31 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, ...@@ -1677,35 +1675,31 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
* only when the page is being freed somewhere else. * only when the page is being freed somewhere else.
*/ */
scan += nr_pages; scan += nr_pages;
switch (__isolate_lru_page_prepare(page, mode)) { if (!__isolate_lru_page_prepare(page, mode)) {
case 0: /* It is being freed elsewhere */
list_move(&page->lru, src);
continue;
}
/* /*
* Be careful not to clear PageLRU until after we're * Be careful not to clear PageLRU until after we're
* sure the page is not being freed elsewhere -- the * sure the page is not being freed elsewhere -- the
* page release code relies on it. * page release code relies on it.
*/ */
if (unlikely(!get_page_unless_zero(page))) if (unlikely(!get_page_unless_zero(page))) {
goto busy; list_move(&page->lru, src);
continue;
}
if (!TestClearPageLRU(page)) { if (!TestClearPageLRU(page)) {
/* /* Another thread is already isolating this page */
* This page may in other isolation path,
* but we still hold lru_lock.
*/
put_page(page); put_page(page);
goto busy; list_move(&page->lru, src);
continue;
} }
nr_taken += nr_pages; nr_taken += nr_pages;
nr_zone_taken[page_zonenum(page)] += nr_pages; nr_zone_taken[page_zonenum(page)] += nr_pages;
list_move(&page->lru, dst); list_move(&page->lru, dst);
break;
default:
busy:
/* else it is being freed elsewhere */
list_move(&page->lru, src);
}
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment