Commit 65ec02cb authored by Vladimir Davydov's avatar Vladimir Davydov Committed by Linus Torvalds

mm: vmscan: move call to shrink_slab() to shrink_zones()

This reduces the indentation level of do_try_to_free_pages() and removes
extra loop over all eligible zones counting the number of on-LRU pages.
Signed-off-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Reviewed-by: default avatarGlauber Costa <glommer@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Dave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 99120b77
...@@ -2291,13 +2291,16 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) ...@@ -2291,13 +2291,16 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
* the caller that it should consider retrying the allocation instead of * the caller that it should consider retrying the allocation instead of
* further reclaim. * further reclaim.
*/ */
static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc,
struct shrink_control *shrink)
{ {
struct zoneref *z; struct zoneref *z;
struct zone *zone; struct zone *zone;
unsigned long nr_soft_reclaimed; unsigned long nr_soft_reclaimed;
unsigned long nr_soft_scanned; unsigned long nr_soft_scanned;
unsigned long lru_pages = 0;
bool aborted_reclaim = false; bool aborted_reclaim = false;
struct reclaim_state *reclaim_state = current->reclaim_state;
/* /*
* If the number of buffer_heads in the machine exceeds the maximum * If the number of buffer_heads in the machine exceeds the maximum
...@@ -2307,6 +2310,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) ...@@ -2307,6 +2310,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
if (buffer_heads_over_limit) if (buffer_heads_over_limit)
sc->gfp_mask |= __GFP_HIGHMEM; sc->gfp_mask |= __GFP_HIGHMEM;
nodes_clear(shrink->nodes_to_scan);
for_each_zone_zonelist_nodemask(zone, z, zonelist, for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_zone(sc->gfp_mask), sc->nodemask) { gfp_zone(sc->gfp_mask), sc->nodemask) {
if (!populated_zone(zone)) if (!populated_zone(zone))
...@@ -2318,6 +2323,10 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) ...@@ -2318,6 +2323,10 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
if (global_reclaim(sc)) { if (global_reclaim(sc)) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue; continue;
lru_pages += zone_reclaimable_pages(zone);
node_set(zone_to_nid(zone), shrink->nodes_to_scan);
if (sc->priority != DEF_PRIORITY && if (sc->priority != DEF_PRIORITY &&
!zone_reclaimable(zone)) !zone_reclaimable(zone))
continue; /* Let kswapd poll it */ continue; /* Let kswapd poll it */
...@@ -2354,6 +2363,20 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) ...@@ -2354,6 +2363,20 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
shrink_zone(zone, sc); shrink_zone(zone, sc);
} }
/*
* Don't shrink slabs when reclaiming memory from over limit cgroups
* but do shrink slab at least once when aborting reclaim for
* compaction to avoid unevenly scanning file/anon LRU pages over slab
* pages.
*/
if (global_reclaim(sc)) {
shrink_slab(shrink, sc->nr_scanned, lru_pages);
if (reclaim_state) {
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
reclaim_state->reclaimed_slab = 0;
}
}
return aborted_reclaim; return aborted_reclaim;
} }
...@@ -2398,9 +2421,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, ...@@ -2398,9 +2421,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
struct shrink_control *shrink) struct shrink_control *shrink)
{ {
unsigned long total_scanned = 0; unsigned long total_scanned = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
struct zoneref *z;
struct zone *zone;
unsigned long writeback_threshold; unsigned long writeback_threshold;
bool aborted_reclaim; bool aborted_reclaim;
...@@ -2413,34 +2433,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, ...@@ -2413,34 +2433,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
sc->priority); sc->priority);
sc->nr_scanned = 0; sc->nr_scanned = 0;
aborted_reclaim = shrink_zones(zonelist, sc); aborted_reclaim = shrink_zones(zonelist, sc, shrink);
/*
* Don't shrink slabs when reclaiming memory from over limit
* cgroups but do shrink slab at least once when aborting
* reclaim for compaction to avoid unevenly scanning file/anon
* LRU pages over slab pages.
*/
if (global_reclaim(sc)) {
unsigned long lru_pages = 0;
nodes_clear(shrink->nodes_to_scan);
for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_zone(sc->gfp_mask), sc->nodemask) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
lru_pages += zone_reclaimable_pages(zone);
node_set(zone_to_nid(zone),
shrink->nodes_to_scan);
}
shrink_slab(shrink, sc->nr_scanned, lru_pages);
if (reclaim_state) {
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
reclaim_state->reclaimed_slab = 0;
}
}
total_scanned += sc->nr_scanned; total_scanned += sc->nr_scanned;
if (sc->nr_reclaimed >= sc->nr_to_reclaim) if (sc->nr_reclaimed >= sc->nr_to_reclaim)
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment