Commit 92df3a72 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: vmscan: throttle reclaim if encountering too many dirty pages under writeback

Workloads that are allocating frequently and writing files place a large
number of dirty pages on the LRU.  With use-once logic, it is possible for
them to reach the end of the LRU quickly requiring the reclaimer to scan
more to find clean pages.  Ordinarily, processes that are dirtying memory
will get throttled by dirty balancing but this is a global heuristic and
does not take into account that LRUs are maintained on a per-zone basis.
This can lead to a situation whereby reclaim is scanning heavily, skipping
over a large number of pages under writeback and recycling them around the
LRU consuming CPU.

This patch checks how many of the number of pages isolated from the LRU
were dirty and under writeback.  If a percentage of them under writeback,
the process will be throttled if a backing device or the zone is
congested.  Note that this applies whether it is anonymous or file-backed
pages that are under writeback meaning that swapping is potentially
throttled.  This is intentional due to the fact if the swap device is
congested, scanning more pages and dispatching more IO is not going to
help matters.

The percentage that must be in writeback depends on the priority.  At
default priority, all of them must be dirty.  At DEF_PRIORITY-1, 50% of
them must be, DEF_PRIORITY-2, 25% etc.  i.e.  as pressure increases the
greater the likelihood the process will get throttled to allow the flusher
threads to make some progress.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Reviewed-by: default avatarMinchan Kim <minchan.kim@gmail.com>
Acked-by: default avatarJohannes Weiner <jweiner@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Alex Elder <aelder@sgi.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f84f6e2b
...@@ -751,7 +751,9 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages) ...@@ -751,7 +751,9 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages)
static unsigned long shrink_page_list(struct list_head *page_list, static unsigned long shrink_page_list(struct list_head *page_list,
struct zone *zone, struct zone *zone,
struct scan_control *sc, struct scan_control *sc,
int priority) int priority,
unsigned long *ret_nr_dirty,
unsigned long *ret_nr_writeback)
{ {
LIST_HEAD(ret_pages); LIST_HEAD(ret_pages);
LIST_HEAD(free_pages); LIST_HEAD(free_pages);
...@@ -759,6 +761,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -759,6 +761,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
unsigned long nr_dirty = 0; unsigned long nr_dirty = 0;
unsigned long nr_congested = 0; unsigned long nr_congested = 0;
unsigned long nr_reclaimed = 0; unsigned long nr_reclaimed = 0;
unsigned long nr_writeback = 0;
cond_resched(); cond_resched();
...@@ -795,6 +798,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -795,6 +798,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
if (PageWriteback(page)) { if (PageWriteback(page)) {
nr_writeback++;
/* /*
* Synchronous reclaim cannot queue pages for * Synchronous reclaim cannot queue pages for
* writeback due to the possibility of stack overflow * writeback due to the possibility of stack overflow
...@@ -1000,6 +1004,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1000,6 +1004,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
list_splice(&ret_pages, page_list); list_splice(&ret_pages, page_list);
count_vm_events(PGACTIVATE, pgactivate); count_vm_events(PGACTIVATE, pgactivate);
*ret_nr_dirty += nr_dirty;
*ret_nr_writeback += nr_writeback;
return nr_reclaimed; return nr_reclaimed;
} }
...@@ -1460,6 +1466,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, ...@@ -1460,6 +1466,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
unsigned long nr_taken; unsigned long nr_taken;
unsigned long nr_anon; unsigned long nr_anon;
unsigned long nr_file; unsigned long nr_file;
unsigned long nr_dirty = 0;
unsigned long nr_writeback = 0;
isolate_mode_t reclaim_mode = ISOLATE_INACTIVE; isolate_mode_t reclaim_mode = ISOLATE_INACTIVE;
while (unlikely(too_many_isolated(zone, file, sc))) { while (unlikely(too_many_isolated(zone, file, sc))) {
...@@ -1512,12 +1520,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, ...@@ -1512,12 +1520,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
nr_reclaimed = shrink_page_list(&page_list, zone, sc, priority); nr_reclaimed = shrink_page_list(&page_list, zone, sc, priority,
&nr_dirty, &nr_writeback);
/* Check if we should syncronously wait for writeback */ /* Check if we should syncronously wait for writeback */
if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) { if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
set_reclaim_mode(priority, sc, true); set_reclaim_mode(priority, sc, true);
nr_reclaimed += shrink_page_list(&page_list, zone, sc, priority); nr_reclaimed += shrink_page_list(&page_list, zone, sc,
priority, &nr_dirty, &nr_writeback);
} }
local_irq_disable(); local_irq_disable();
...@@ -1527,6 +1537,32 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, ...@@ -1527,6 +1537,32 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list); putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
/*
* If reclaim is isolating dirty pages under writeback, it implies
* that the long-lived page allocation rate is exceeding the page
* laundering rate. Either the global limits are not being effective
* at throttling processes due to the page distribution throughout
* zones or there is heavy usage of a slow backing device. The
* only option is to throttle from reclaim context which is not ideal
* as there is no guarantee the dirtying process is throttled in the
* same way balance_dirty_pages() manages.
*
* This scales the number of dirty pages that must be under writeback
* before throttling depending on priority. It is a simple backoff
* function that has the most effect in the range DEF_PRIORITY to
* DEF_PRIORITY-2 which is the priority reclaim is considered to be
* in trouble and reclaim is considered to be in trouble.
*
* DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle
* DEF_PRIORITY-1 50% must be PageWriteback
* DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble
* ...
* DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
* isolated page is PageWriteback
*/
if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority)))
wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
zone_idx(zone), zone_idx(zone),
nr_scanned, nr_reclaimed, nr_scanned, nr_reclaimed,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment