Commit b1a6f21e authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: vmscan: stall page reclaim after a list of pages have been processed

Commit "mm: vmscan: Block kswapd if it is encountering pages under
writeback" blocks page reclaim if it encounters pages under writeback
marked for immediate reclaim.  It blocks while pages are still isolated
from the LRU which is unnecessary.  This patch defers the blocking until
after the isolated pages have been processed and tidies up some of the
comments.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Rik van Riel <riel@redhat.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Jiri Slaby <jslaby@suse.cz>
Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Cc: Zlatko Calusic <zcalusic@bitsync.net>
Cc: dormando <dormando@rydia.net>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e2be15f6
...@@ -697,6 +697,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -697,6 +697,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
enum ttu_flags ttu_flags, enum ttu_flags ttu_flags,
unsigned long *ret_nr_unqueued_dirty, unsigned long *ret_nr_unqueued_dirty,
unsigned long *ret_nr_writeback, unsigned long *ret_nr_writeback,
unsigned long *ret_nr_immediate,
bool force_reclaim) bool force_reclaim)
{ {
LIST_HEAD(ret_pages); LIST_HEAD(ret_pages);
...@@ -707,6 +708,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -707,6 +708,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
unsigned long nr_congested = 0; unsigned long nr_congested = 0;
unsigned long nr_reclaimed = 0; unsigned long nr_reclaimed = 0;
unsigned long nr_writeback = 0; unsigned long nr_writeback = 0;
unsigned long nr_immediate = 0;
cond_resched(); cond_resched();
...@@ -773,8 +775,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -773,8 +775,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* IO can complete. Waiting on the page itself risks an * IO can complete. Waiting on the page itself risks an
* indefinite stall if it is impossible to writeback the * indefinite stall if it is impossible to writeback the
* page due to IO error or disconnected storage so instead * page due to IO error or disconnected storage so instead
* block for HZ/10 or until some IO completes then clear the * note that the LRU is being scanned too quickly and the
* ZONE_WRITEBACK flag to recheck if the condition exists. * caller can stall after page list has been processed.
* *
* 2) Global reclaim encounters a page, memcg encounters a * 2) Global reclaim encounters a page, memcg encounters a
* page that is not marked for immediate reclaim or * page that is not marked for immediate reclaim or
...@@ -804,10 +806,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -804,10 +806,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (current_is_kswapd() && if (current_is_kswapd() &&
PageReclaim(page) && PageReclaim(page) &&
zone_is_reclaim_writeback(zone)) { zone_is_reclaim_writeback(zone)) {
unlock_page(page); nr_immediate++;
congestion_wait(BLK_RW_ASYNC, HZ/10); goto keep_locked;
zone_clear_flag(zone, ZONE_WRITEBACK);
goto keep;
/* Case 2 above */ /* Case 2 above */
} else if (global_reclaim(sc) || } else if (global_reclaim(sc) ||
...@@ -1033,6 +1033,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1033,6 +1033,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
mem_cgroup_uncharge_end(); mem_cgroup_uncharge_end();
*ret_nr_unqueued_dirty += nr_unqueued_dirty; *ret_nr_unqueued_dirty += nr_unqueued_dirty;
*ret_nr_writeback += nr_writeback; *ret_nr_writeback += nr_writeback;
*ret_nr_immediate += nr_immediate;
return nr_reclaimed; return nr_reclaimed;
} }
...@@ -1044,7 +1045,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, ...@@ -1044,7 +1045,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
.priority = DEF_PRIORITY, .priority = DEF_PRIORITY,
.may_unmap = 1, .may_unmap = 1,
}; };
unsigned long ret, dummy1, dummy2; unsigned long ret, dummy1, dummy2, dummy3;
struct page *page, *next; struct page *page, *next;
LIST_HEAD(clean_pages); LIST_HEAD(clean_pages);
...@@ -1057,7 +1058,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, ...@@ -1057,7 +1058,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
ret = shrink_page_list(&clean_pages, zone, &sc, ret = shrink_page_list(&clean_pages, zone, &sc,
TTU_UNMAP|TTU_IGNORE_ACCESS, TTU_UNMAP|TTU_IGNORE_ACCESS,
&dummy1, &dummy2, true); &dummy1, &dummy2, &dummy3, true);
list_splice(&clean_pages, page_list); list_splice(&clean_pages, page_list);
__mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret); __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
return ret; return ret;
...@@ -1353,6 +1354,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1353,6 +1354,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
unsigned long nr_taken; unsigned long nr_taken;
unsigned long nr_unqueued_dirty = 0; unsigned long nr_unqueued_dirty = 0;
unsigned long nr_writeback = 0; unsigned long nr_writeback = 0;
unsigned long nr_immediate = 0;
isolate_mode_t isolate_mode = 0; isolate_mode_t isolate_mode = 0;
int file = is_file_lru(lru); int file = is_file_lru(lru);
struct zone *zone = lruvec_zone(lruvec); struct zone *zone = lruvec_zone(lruvec);
...@@ -1394,7 +1396,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1394,7 +1396,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
return 0; return 0;
nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
&nr_unqueued_dirty, &nr_writeback, false); &nr_unqueued_dirty, &nr_writeback, &nr_immediate,
false);
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
...@@ -1447,14 +1450,28 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1447,14 +1450,28 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
} }
/* /*
* Similarly, if many dirty pages are encountered that are not * memcg will stall in page writeback so only consider forcibly
* currently being written then flag that kswapd should start * stalling for global reclaim
* writing back pages and stall to give a chance for flushers
* to catch up.
*/ */
if (global_reclaim(sc) && nr_unqueued_dirty == nr_taken) { if (global_reclaim(sc)) {
congestion_wait(BLK_RW_ASYNC, HZ/10); /*
* If dirty pages are scanned that are not queued for IO, it
* implies that flushers are not keeping up. In this case, flag
* the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
* pages from reclaim context. It will forcibly stall in the
* next check.
*/
if (nr_unqueued_dirty == nr_taken)
zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY); zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
/*
* In addition, if kswapd scans pages marked marked for
* immediate reclaim and under writeback (nr_immediate), it
* implies that pages are cycling through the LRU faster than
* they are written so also forcibly stall.
*/
if (nr_unqueued_dirty == nr_taken || nr_immediate)
congestion_wait(BLK_RW_ASYNC, HZ/10);
} }
trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment