Commit fe2c2a10 authored by Rik van Riel's avatar Rik van Riel Committed by Linus Torvalds

vmscan: reclaim at order 0 when compaction is enabled

When built with CONFIG_COMPACTION, kswapd should not try to free
contiguous pages, because it is not trying hard enough to have a real
chance at being successful, but still disrupts the LRU enough to break
other things.

Do not do higher order page isolation unless we really are in lumpy
reclaim mode.

Stop reclaiming pages once we have enough free pages that compaction can
deal with things, and we hit the normal order 0 watermarks used by kswapd.

Also remove a line of code that increments balanced right before exiting
the function.
Signed-off-by: default avatarRik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Hillf Danton <dhillf@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 67f96aa2
...@@ -1138,7 +1138,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file) ...@@ -1138,7 +1138,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
* @mz: The mem_cgroup_zone to pull pages from. * @mz: The mem_cgroup_zone to pull pages from.
* @dst: The temp list to put pages on to. * @dst: The temp list to put pages on to.
* @nr_scanned: The number of pages that were scanned. * @nr_scanned: The number of pages that were scanned.
* @order: The caller's attempted allocation order * @sc: The scan_control struct for this reclaim session
* @mode: One of the LRU isolation modes * @mode: One of the LRU isolation modes
* @active: True [1] if isolating active pages * @active: True [1] if isolating active pages
* @file: True [1] if isolating file [!anon] pages * @file: True [1] if isolating file [!anon] pages
...@@ -1147,8 +1147,8 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file) ...@@ -1147,8 +1147,8 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
*/ */
static unsigned long isolate_lru_pages(unsigned long nr_to_scan, static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
struct mem_cgroup_zone *mz, struct list_head *dst, struct mem_cgroup_zone *mz, struct list_head *dst,
unsigned long *nr_scanned, int order, isolate_mode_t mode, unsigned long *nr_scanned, struct scan_control *sc,
int active, int file) isolate_mode_t mode, int active, int file)
{ {
struct lruvec *lruvec; struct lruvec *lruvec;
struct list_head *src; struct list_head *src;
...@@ -1194,7 +1194,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, ...@@ -1194,7 +1194,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
BUG(); BUG();
} }
if (!order) if (!sc->order || !(sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM))
continue; continue;
/* /*
...@@ -1208,8 +1208,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, ...@@ -1208,8 +1208,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
*/ */
zone_id = page_zone_id(page); zone_id = page_zone_id(page);
page_pfn = page_to_pfn(page); page_pfn = page_to_pfn(page);
pfn = page_pfn & ~((1 << order) - 1); pfn = page_pfn & ~((1 << sc->order) - 1);
end_pfn = pfn + (1 << order); end_pfn = pfn + (1 << sc->order);
for (; pfn < end_pfn; pfn++) { for (; pfn < end_pfn; pfn++) {
struct page *cursor_page; struct page *cursor_page;
...@@ -1275,7 +1275,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, ...@@ -1275,7 +1275,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
*nr_scanned = scan; *nr_scanned = scan;
trace_mm_vmscan_lru_isolate(order, trace_mm_vmscan_lru_isolate(sc->order,
nr_to_scan, scan, nr_to_scan, scan,
nr_taken, nr_taken,
nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
...@@ -1533,9 +1533,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, ...@@ -1533,9 +1533,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, &nr_scanned,
&nr_scanned, sc->order, sc, isolate_mode, 0, file);
isolate_mode, 0, file);
if (global_reclaim(sc)) { if (global_reclaim(sc)) {
zone->pages_scanned += nr_scanned; zone->pages_scanned += nr_scanned;
if (current_is_kswapd()) if (current_is_kswapd())
...@@ -1711,8 +1710,7 @@ static void shrink_active_list(unsigned long nr_to_scan, ...@@ -1711,8 +1710,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, &nr_scanned, sc,
&nr_scanned, sc->order,
isolate_mode, 1, file); isolate_mode, 1, file);
if (global_reclaim(sc)) if (global_reclaim(sc))
zone->pages_scanned += nr_scanned; zone->pages_scanned += nr_scanned;
...@@ -2758,7 +2756,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, ...@@ -2758,7 +2756,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
*/ */
for (i = 0; i <= end_zone; i++) { for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i; struct zone *zone = pgdat->node_zones + i;
int nr_slab; int nr_slab, testorder;
unsigned long balance_gap; unsigned long balance_gap;
if (!populated_zone(zone)) if (!populated_zone(zone))
...@@ -2791,7 +2789,20 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, ...@@ -2791,7 +2789,20 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
(zone->present_pages + (zone->present_pages +
KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
KSWAPD_ZONE_BALANCE_GAP_RATIO); KSWAPD_ZONE_BALANCE_GAP_RATIO);
if (!zone_watermark_ok_safe(zone, order, /*
* Kswapd reclaims only single pages with compaction
* enabled. Trying too hard to reclaim until contiguous
* free pages have become available can hurt performance
* by evicting too much useful data from memory.
* Do not reclaim more than needed for compaction.
*/
testorder = order;
if (COMPACTION_BUILD && order &&
compaction_suitable(zone, order) !=
COMPACT_SKIPPED)
testorder = 0;
if (!zone_watermark_ok_safe(zone, testorder,
high_wmark_pages(zone) + balance_gap, high_wmark_pages(zone) + balance_gap,
end_zone, 0)) { end_zone, 0)) {
shrink_zone(priority, zone, &sc); shrink_zone(priority, zone, &sc);
...@@ -2820,7 +2831,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, ...@@ -2820,7 +2831,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
continue; continue;
} }
if (!zone_watermark_ok_safe(zone, order, if (!zone_watermark_ok_safe(zone, testorder,
high_wmark_pages(zone), end_zone, 0)) { high_wmark_pages(zone), end_zone, 0)) {
all_zones_ok = 0; all_zones_ok = 0;
/* /*
...@@ -2917,6 +2928,10 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, ...@@ -2917,6 +2928,10 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
if (zone->all_unreclaimable && priority != DEF_PRIORITY) if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue; continue;
/* Would compaction fail due to lack of free memory? */
if (compaction_suitable(zone, order) == COMPACT_SKIPPED)
goto loop_again;
/* Confirm the zone is balanced for order-0 */ /* Confirm the zone is balanced for order-0 */
if (!zone_watermark_ok(zone, 0, if (!zone_watermark_ok(zone, 0,
high_wmark_pages(zone), 0, 0)) { high_wmark_pages(zone), 0, 0)) {
...@@ -2926,8 +2941,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, ...@@ -2926,8 +2941,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
/* If balanced, clear the congested flag */ /* If balanced, clear the congested flag */
zone_clear_flag(zone, ZONE_CONGESTED); zone_clear_flag(zone, ZONE_CONGESTED);
if (i <= *classzone_idx)
balanced += zone->present_pages;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment