Commit 61317289 authored by Hillf Danton's avatar Hillf Danton Committed by Linus Torvalds

mm/vmscan.c: cleanup with s/reclaim_mode/isolate_mode/

With tons of reclaim_mode (defined as one field of struct scan_control)
already in the file, it is clearer to rename the local reclaim_mode when
setting up the isolation mode.
Signed-off-by: default avatarHillf Danton <dhillf@gmail.com>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c3f0327f
...@@ -1509,7 +1509,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, ...@@ -1509,7 +1509,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
unsigned long nr_file; unsigned long nr_file;
unsigned long nr_dirty = 0; unsigned long nr_dirty = 0;
unsigned long nr_writeback = 0; unsigned long nr_writeback = 0;
isolate_mode_t reclaim_mode = ISOLATE_INACTIVE; isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
struct zone *zone = mz->zone; struct zone *zone = mz->zone;
while (unlikely(too_many_isolated(zone, file, sc))) { while (unlikely(too_many_isolated(zone, file, sc))) {
...@@ -1522,20 +1522,20 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, ...@@ -1522,20 +1522,20 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
set_reclaim_mode(priority, sc, false); set_reclaim_mode(priority, sc, false);
if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM) if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
reclaim_mode |= ISOLATE_ACTIVE; isolate_mode |= ISOLATE_ACTIVE;
lru_add_drain(); lru_add_drain();
if (!sc->may_unmap) if (!sc->may_unmap)
reclaim_mode |= ISOLATE_UNMAPPED; isolate_mode |= ISOLATE_UNMAPPED;
if (!sc->may_writepage) if (!sc->may_writepage)
reclaim_mode |= ISOLATE_CLEAN; isolate_mode |= ISOLATE_CLEAN;
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list,
&nr_scanned, sc->order, &nr_scanned, sc->order,
reclaim_mode, 0, file); isolate_mode, 0, file);
if (global_reclaim(sc)) { if (global_reclaim(sc)) {
zone->pages_scanned += nr_scanned; zone->pages_scanned += nr_scanned;
if (current_is_kswapd()) if (current_is_kswapd())
...@@ -1699,21 +1699,21 @@ static void shrink_active_list(unsigned long nr_to_scan, ...@@ -1699,21 +1699,21 @@ static void shrink_active_list(unsigned long nr_to_scan,
struct page *page; struct page *page;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
unsigned long nr_rotated = 0; unsigned long nr_rotated = 0;
isolate_mode_t reclaim_mode = ISOLATE_ACTIVE; isolate_mode_t isolate_mode = ISOLATE_ACTIVE;
struct zone *zone = mz->zone; struct zone *zone = mz->zone;
lru_add_drain(); lru_add_drain();
if (!sc->may_unmap) if (!sc->may_unmap)
reclaim_mode |= ISOLATE_UNMAPPED; isolate_mode |= ISOLATE_UNMAPPED;
if (!sc->may_writepage) if (!sc->may_writepage)
reclaim_mode |= ISOLATE_CLEAN; isolate_mode |= ISOLATE_CLEAN;
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold,
&nr_scanned, sc->order, &nr_scanned, sc->order,
reclaim_mode, 1, file); isolate_mode, 1, file);
if (global_reclaim(sc)) if (global_reclaim(sc))
zone->pages_scanned += nr_scanned; zone->pages_scanned += nr_scanned;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment