Commit d43006d5 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: vmscan: have kswapd writeback pages based on dirty pages encountered, not priority

Currently kswapd queues dirty pages for writeback if scanning at an
elevated priority but the priority kswapd scans at is not related to the
number of unqueued dirty encountered.  Since commit "mm: vmscan: Flatten
kswapd priority loop", the priority is related to the size of the LRU
and the zone watermark which is no indication as to whether kswapd
should write pages or not.

This patch tracks if an excessive number of unqueued dirty pages are
being encountered at the end of the LRU.  If so, it indicates that dirty
pages are being recycled before flusher threads can clean them and flags
the zone so that kswapd will start writing pages until the zone is
balanced.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Jiri Slaby <jslaby@suse.cz>
Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Tested-by: default avatarZlatko Calusic <zcalusic@bitsync.net>
Cc: dormando <dormando@rydia.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9aa41348
...@@ -495,6 +495,10 @@ typedef enum { ...@@ -495,6 +495,10 @@ typedef enum {
ZONE_CONGESTED, /* zone has many dirty pages backed by ZONE_CONGESTED, /* zone has many dirty pages backed by
* a congested BDI * a congested BDI
*/ */
ZONE_TAIL_LRU_DIRTY, /* reclaim scanning has recently found
* many dirty file pages at the tail
* of the LRU.
*/
} zone_flags_t; } zone_flags_t;
static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
...@@ -517,6 +521,11 @@ static inline int zone_is_reclaim_congested(const struct zone *zone) ...@@ -517,6 +521,11 @@ static inline int zone_is_reclaim_congested(const struct zone *zone)
return test_bit(ZONE_CONGESTED, &zone->flags); return test_bit(ZONE_CONGESTED, &zone->flags);
} }
static inline int zone_is_reclaim_dirty(const struct zone *zone)
{
return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags);
}
static inline int zone_is_reclaim_locked(const struct zone *zone) static inline int zone_is_reclaim_locked(const struct zone *zone)
{ {
return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
......
...@@ -676,13 +676,14 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -676,13 +676,14 @@ static unsigned long shrink_page_list(struct list_head *page_list,
struct zone *zone, struct zone *zone,
struct scan_control *sc, struct scan_control *sc,
enum ttu_flags ttu_flags, enum ttu_flags ttu_flags,
unsigned long *ret_nr_dirty, unsigned long *ret_nr_unqueued_dirty,
unsigned long *ret_nr_writeback, unsigned long *ret_nr_writeback,
bool force_reclaim) bool force_reclaim)
{ {
LIST_HEAD(ret_pages); LIST_HEAD(ret_pages);
LIST_HEAD(free_pages); LIST_HEAD(free_pages);
int pgactivate = 0; int pgactivate = 0;
unsigned long nr_unqueued_dirty = 0;
unsigned long nr_dirty = 0; unsigned long nr_dirty = 0;
unsigned long nr_congested = 0; unsigned long nr_congested = 0;
unsigned long nr_reclaimed = 0; unsigned long nr_reclaimed = 0;
...@@ -808,14 +809,17 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -808,14 +809,17 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (PageDirty(page)) { if (PageDirty(page)) {
nr_dirty++; nr_dirty++;
if (!PageWriteback(page))
nr_unqueued_dirty++;
/* /*
* Only kswapd can writeback filesystem pages to * Only kswapd can writeback filesystem pages to
* avoid risk of stack overflow but do not writeback * avoid risk of stack overflow but only writeback
* unless under significant pressure. * if many dirty pages have been encountered.
*/ */
if (page_is_file_cache(page) && if (page_is_file_cache(page) &&
(!current_is_kswapd() || (!current_is_kswapd() ||
sc->priority >= DEF_PRIORITY - 2)) { !zone_is_reclaim_dirty(zone))) {
/* /*
* Immediately reclaim when written back. * Immediately reclaim when written back.
* Similar in principal to deactivate_page() * Similar in principal to deactivate_page()
...@@ -960,7 +964,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -960,7 +964,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
list_splice(&ret_pages, page_list); list_splice(&ret_pages, page_list);
count_vm_events(PGACTIVATE, pgactivate); count_vm_events(PGACTIVATE, pgactivate);
mem_cgroup_uncharge_end(); mem_cgroup_uncharge_end();
*ret_nr_dirty += nr_dirty; *ret_nr_unqueued_dirty += nr_unqueued_dirty;
*ret_nr_writeback += nr_writeback; *ret_nr_writeback += nr_writeback;
return nr_reclaimed; return nr_reclaimed;
} }
...@@ -1373,6 +1377,15 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1373,6 +1377,15 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
(nr_taken >> (DEF_PRIORITY - sc->priority))) (nr_taken >> (DEF_PRIORITY - sc->priority)))
wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
/*
* Similarly, if many dirty pages are encountered that are not
* currently being written then flag that kswapd should start
* writing back pages.
*/
if (global_reclaim(sc) && nr_dirty &&
nr_dirty >= (nr_taken >> (DEF_PRIORITY - sc->priority)))
zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
zone_idx(zone), zone_idx(zone),
nr_scanned, nr_reclaimed, nr_scanned, nr_reclaimed,
...@@ -2769,8 +2782,12 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, ...@@ -2769,8 +2782,12 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
end_zone = i; end_zone = i;
break; break;
} else { } else {
/* If balanced, clear the congested flag */ /*
* If balanced, clear the dirty and congested
* flags
*/
zone_clear_flag(zone, ZONE_CONGESTED); zone_clear_flag(zone, ZONE_CONGESTED);
zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
} }
} }
...@@ -2888,8 +2905,10 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, ...@@ -2888,8 +2905,10 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
* possible there are dirty pages backed by * possible there are dirty pages backed by
* congested BDIs but as pressure is relieved, * congested BDIs but as pressure is relieved,
* speculatively avoid congestion waits * speculatively avoid congestion waits
* or writing pages from kswapd context.
*/ */
zone_clear_flag(zone, ZONE_CONGESTED); zone_clear_flag(zone, ZONE_CONGESTED);
zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment